modified the code

This commit is contained in:
yunjey
2017-04-13 19:49:10 +09:00
parent fd445c96b9
commit 0e59313b4b
5 changed files with 134 additions and 88 deletions

View File

@ -1,56 +1,55 @@
from data import get_data_loader
from vocab import Vocabulary
from configuration import Config
import argparse
import torch
import torch.nn as nn
import numpy as np
import os
from data_loader import get_loader
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
import torch
import torch.nn as nn
import numpy as np
import pickle
import os
from torchvision import transforms
def main():
# Configuration for hyper-parameters
config = Config()
def main(args):
# Create model directory
if not os.path.exists(config.model_path):
os.makedirs(config.model_path)
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing
transform = config.train_transform
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load vocabulary wrapper
with open(os.path.join(config.vocab_path, 'vocab.pkl'), 'rb') as f:
# Load vocabulary wrapper.
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build data loader
image_path = os.path.join(config.image_path, 'train2014')
json_path = os.path.join(config.caption_path, 'captions_train2014.json')
train_loader = get_data_loader(image_path, json_path, vocab,
transform, config.batch_size,
shuffle=True, num_workers=config.num_threads)
total_step = len(train_loader)
# Build Models
encoder = EncoderCNN(config.embed_size)
decoder = DecoderRNN(config.embed_size, config.hidden_size,
len(vocab), config.num_layers)
if torch.cuda.is_available()
# Build data loader
data_loader = get_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.resnet.fc.parameters())
optimizer = torch.optim.Adam(params, lr=config.learning_rate)
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
# Train the Models
for epoch in range(config.num_epochs):
for i, (images, captions, lengths) in enumerate(train_loader):
total_step = len(data_loader)
for epoch in range(args.num_epochs):
for i, (images, captions, lengths) in enumerate(data_loader):
# Set mini-batch dataset
images = Variable(images)
@ -70,19 +69,50 @@ def main():
optimizer.step()
# Print log info
if i % config.log_step == 0:
if i % args.log_step == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
%(epoch, config.num_epochs, i, total_step,
%(epoch, args.num_epochs, i, total_step,
loss.data[0], np.exp(loss.data[0])))
# Save the Model
if (i+1) % config.save_step == 0:
# Save the models
if (i+1) % args.save_step == 0:
torch.save(decoder.state_dict(),
os.path.join(config.model_path,
os.path.join(args.model_path,
'decoder-%d-%d.pkl' %(epoch+1, i+1)))
torch.save(encoder.state_dict(),
os.path.join(config.model_path,
os.path.join(args.model_path,
'encoder-%d-%d.pkl' %(epoch+1, i+1)))
if __name__ == '__main__':
main()
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./models/' ,
help='path for saving trained models')
parser.add_argument('--crop_size', type=int, default=224 ,
help='size for randomly cropping images')
parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl',
help='path for vocabulary wrapper')
parser.add_argument('--image_dir', type=str, default='./data/resized2014' ,
help='directory for resized images')
parser.add_argument('--caption_path', type=str,
default='./data/annotations/captions_train2014.json',
help='path for train annotation json file')
parser.add_argument('--log_step', type=int , default=10,
help='step size for prining log info')
parser.add_argument('--save_step', type=int , default=1000,
help='step size for saving trained models')
# Model parameters
parser.add_argument('--embed_size', type=int , default=256 ,
help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=512 ,
help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1 ,
help='number of layers in lstm')
parser.add_argument('--num_epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--learning_rate', type=float, default=0.001)
args = parser.parse_args()
print(args)
main(args)