Skip to content

Commit 4ba7f14

Browse files
committed
fix bug
1 parent 30c498f commit 4ba7f14

File tree

2 files changed

+20
-15
lines changed

2 files changed

+20
-15
lines changed

06-Natural Language Process/N-Gram.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import torch
2-
from torch import nn, optim
32
import torch.nn.functional as F
3+
from torch import nn, optim
44
from torch.autograd import Variable
55

66
CONTEXT_SIZE = 2

06-Natural Language Process/LSTM.py renamed to 06-Natural Language Process/seq-lstm.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
__author__ = 'SherlockLiao'
22

33
import torch
4+
import torch.nn.functional as F
45
from torch import nn, optim
56
from torch.autograd import Variable
6-
import torch.nn.functional as F
77

8-
training_data = [
9-
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
10-
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
11-
]
8+
training_data = [("The dog ate the apple".split(),
9+
["DET", "NN", "V", "DET", "NN"]),
10+
("Everybody read that book".split(), ["NN", "V", "DET",
11+
"NN"])]
1212

1313
word_to_idx = {}
1414
tag_to_idx = {}
@@ -38,12 +38,12 @@ def forward(self, x):
3838

3939

4040
class LSTMTagger(nn.Module):
41-
def __init__(self, n_word, n_char, char_dim, n_dim, char_hidden,
42-
n_hidden, n_tag):
41+
def __init__(self, n_word, n_char, char_dim, n_dim, char_hidden, n_hidden,
42+
n_tag):
4343
super(LSTMTagger, self).__init__()
4444
self.word_embedding = nn.Embedding(n_word, n_dim)
4545
self.char_lstm = CharLSTM(n_char, char_dim, char_hidden)
46-
self.lstm = nn.LSTM(n_dim+char_hidden, n_hidden, batch_first=True)
46+
self.lstm = nn.LSTM(n_dim + char_hidden, n_hidden, batch_first=True)
4747
self.linear1 = nn.Linear(n_hidden, n_tag)
4848

4949
def forward(self, x, word):
@@ -54,11 +54,16 @@ def forward(self, x, word):
5454
char_list.append(character_to_idx[letter.lower()])
5555
char_list = torch.LongTensor(char_list)
5656
char_list = char_list.unsqueeze(0)
57-
tempchar = self.char_lstm(Variable(char_list).cuda())
57+
if torch.cuda.is_available():
58+
tempchar = self.char_lstm(Variable(char_list).cuda())
59+
else:
60+
tempchar = self.char_lstm(Variable(char_list))
5861
tempchar = tempchar.squeeze(0)
5962
char = torch.cat((char, tempchar.cpu().data), 0)
6063
char = char.squeeze(1)
61-
char = Variable(char).cuda()
64+
if torch.cuda.is_available():
65+
char = char.cuda()
66+
char = Variable(char)
6267
x = self.word_embedding(x)
6368
x = torch.cat((x, char), 1)
6469
x = x.unsqueeze(0)
@@ -69,8 +74,8 @@ def forward(self, x, word):
6974
return y
7075

7176

72-
model = LSTMTagger(len(word_to_idx), len(character_to_idx), 10,
73-
100, 50, 128, len(tag_to_idx))
77+
model = LSTMTagger(
78+
len(word_to_idx), len(character_to_idx), 10, 100, 50, 128, len(tag_to_idx))
7479
if torch.cuda.is_available():
7580
model = model.cuda()
7681
criterion = nn.CrossEntropyLoss()
@@ -84,8 +89,8 @@ def make_sequence(x, dic):
8489

8590

8691
for epoch in range(300):
87-
print('*'*10)
88-
print('epoch {}'.format(epoch+1))
92+
print('*' * 10)
93+
print('epoch {}'.format(epoch + 1))
8994
running_loss = 0
9095
for data in training_data:
9196
word, tag = data

0 commit comments

Comments
 (0)