mirror of
https://github.com/yunjey/pytorch-tutorial.git
synced 2025-07-04 16:12:12 +08:00
Update default path
This commit is contained in:
@ -69,8 +69,8 @@ def main(args):
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--image', type=str, required=True, help='input image for generating caption')
|
||||
parser.add_argument('--encoder_path', type=str, default='models/encoder-2-1000.ckpt', help='path for trained encoder')
|
||||
parser.add_argument('--decoder_path', type=str, default='models/decoder-2-1000.ckpt', help='path for trained decoder')
|
||||
parser.add_argument('--encoder_path', type=str, default='models/encoder-5-3000.pkl', help='path for trained encoder')
|
||||
parser.add_argument('--decoder_path', type=str, default='models/decoder-5-3000.pkl', help='path for trained decoder')
|
||||
parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
|
||||
|
||||
# Model parameters (should be same as paramters in train.py)
|
||||
@ -78,4 +78,4 @@ if __name__ == '__main__':
|
||||
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')
|
||||
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
main(args)
|
||||
|
Reference in New Issue
Block a user