forked from kefirski/contiguous-succotash
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_word_embeddings.py
61 lines (46 loc) · 2.29 KB
/
train_word_embeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import argparse
import numpy as np
import torch as t
from torch.autograd import Variable
from torch.optim import SGD
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from selfModules.neg import NEG_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='word2vec')
parser.add_argument('--num-iterations', type=int, default=1000000, metavar='NI',
help='num iterations (default: 1000000)')
parser.add_argument('--batch-size', type=int, default=10, metavar='BS',
help='batch size (default: 10)')
parser.add_argument('--num-sample', type=int, default=5, metavar='NS',
help='num sample (default: 5)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
args = parser.parse_args()
prefix = 'poem'
word_is_char = True
batch_loader = BatchLoader('', prefix, word_is_char)
params = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size, word_is_char)
neg_loss = NEG_loss(params.word_vocab_size, params.word_embed_size)
if args.use_cuda and t.cuda.is_available():
neg_loss = neg_loss.cuda()
# NEG_loss is defined over two embedding matrixes with shape of [params.word_vocab_size, params.word_embed_size]
optimizer = SGD(neg_loss.parameters(), 0.1)
for iteration in range(args.num_iterations):
input_idx, target_idx = batch_loader.next_embedding_seq(args.batch_size)
input = Variable(t.from_numpy(input_idx).long())
target = Variable(t.from_numpy(target_idx).long())
if args.use_cuda and t.cuda.is_available():
input, target = input.cuda(), target.cuda()
out = neg_loss(input, target, args.num_sample).mean()
optimizer.zero_grad()
out.backward()
optimizer.step()
if iteration % 500 == 0:
out = out.cpu().data.numpy()[0]
print('iteration = {}, loss = {}'.format(iteration, out))
word_embeddings = neg_loss.input_embeddings()
np.save('data/' + batch_loader.prefix + 'word_embeddings.npy', word_embeddings)