Skip to content

Commit

Permalink
Merge pull request NTMC-Community#180 from faneshion/feature/dynamic_…
Browse files Browse the repository at this point in the history
…pool_div_0

Feature/dynamic pool div 0
  • Loading branch information
pl8787 authored Jun 18, 2018
2 parents e8209d3 + 09e4f43 commit 9158e16
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 14 deletions.
2 changes: 1 addition & 1 deletion examples/QuoraQP/config/matchpyramid_quoraqp.config
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"embed_size": 300,
"train_embed": true,
"embed_path": "./data/QuoraQP/embed_glove_d300_norm",
"vocab_size": 28159,
"vocab_size": 28211,
"target_mode": "classification",
"class_num": 2,
"text1_maxlen": 20,
Expand Down
20 changes: 16 additions & 4 deletions matchzoo/layers/DynamicMaxPooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,26 @@ def compute_output_shape(self, input_shape):
def dynamic_pooling_index(len1, len2, max_len1, max_len2,
compress_ratio1 = 1, compress_ratio2 = 1):
def dpool_index_(batch_idx, len1_one, len2_one, max_len1, max_len2):
'''
TODO: Here is the check of sentences length to be positive.
To make sure that the lenght of the input sentences are positive.
if len1_one == 0:
print("len1 = 0 at batch_idx = {}".format(batch_idx))
print("[Error:DynamicPooling] len1 = 0 at batch_idx = {}".format(batch_idx))
exit()
if len2_one == 0:
print("len2 = 0 at batch_idx = {}".format(batch_idx))
print("[Error:DynamicPooling] len2 = 0 at batch_idx = {}".format(batch_idx))
exit()
stride1 = 1.0 * max_len1 / len1_one
stride2 = 1.0 * max_len2 / len2_one
'''
if len1_one == 0:
stride1 = max_len1
else:
stride1 = 1.0 * max_len1 / len1_one

if len2_one == 0:
stride2 = max_len2
else:
stride2 = 1.0 * max_len2 / len2_one

idx1_one = [int(i / stride1) for i in range(max_len1)]
idx2_one = [int(i / stride2) for i in range(max_len2)]
mesh1, mesh2 = np.meshgrid(idx1_one, idx2_one)
Expand Down
2 changes: 1 addition & 1 deletion matchzoo/models/arci.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding,Merge, Dot
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel

Expand Down
2 changes: 1 addition & 1 deletion matchzoo/models/arcii.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding,Merge, Dot
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel

Expand Down
2 changes: 1 addition & 1 deletion matchzoo/models/bimpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding, Merge, Dot
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel
from layers.DynamicMaxPooling import *
Expand Down
1 change: 0 additions & 1 deletion matchzoo/models/duet.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import *
#from keras.layers import Reshape, Embedding,Merge, Dot, Lambda
from keras.optimizers import Adam
from model import BasicModel
import tensorflow as tf
Expand Down
2 changes: 1 addition & 1 deletion matchzoo/models/matchpyramid.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding, Merge, Dot
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel
from layers.DynamicMaxPooling import *
Expand Down
2 changes: 1 addition & 1 deletion matchzoo/models/mvlstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding,Merge, Dot
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel
from utils.utility import *
Expand Down
6 changes: 3 additions & 3 deletions matchzoo/utils/rank_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,12 @@ def convert_embed_2_numpy(embed_dict, max_size=0, embed=None):
if embed is None:
embed = np.zeros((max_size, feat_size), dtype=np.float32)

if len(embed_dict) != len(embed):
raise Exception("vocab_size %d is not equal to embed_size %d, change the vocab_size in the config!"
if len(embed_dict) > len(embed):
raise Exception("vocab_size %d is larger than embed_size %d, change the vocab_size in the config!"
% (len(embed_dict), len(embed)))

for k in embed_dict:
embed[k] = np.array(embed_dict[k])
print('Generate numpy embed: %s', str(embed.shape), end='\n')
print('Generate numpy embed:', str(embed.shape), end='\n')
return embed

0 comments on commit 9158e16

Please sign in to comment.