Skip to content

Commit

Permalink
update 2.0 fleet api
Browse files Browse the repository at this point in the history
  • Loading branch information
MrChengmo committed Nov 5, 2020
1 parent 63a4d5b commit 741110d
Show file tree
Hide file tree
Showing 16 changed files with 376 additions and 57 deletions.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -25,25 +25,22 @@ hyper_parameters:
dense_feature_dim: 13
fc_sizes: [400, 400, 400]

mode: [local_train]
mode: [collective]
runner:
- name: ps_cpu
class: cluster_train
epochs: 10
class: local_cluster_train
epochs: 1
device: cpu
fleet_mode: ps
save_checkpoint_interval: 1
save_checkpoint_path: "increment_dnn"
print_interval: 1
print_interval: 10
phases: [phase1]

- name: ps_gpu
class: cluster_train
- name: collective
class: single
epochs: 10
device: gpu
fleet_mode: ps
save_checkpoint_interval: 1
save_checkpoint_path: "increment_dnn"
fleet_mode: collective
selected_gpus: "0,1"
print_interval: 1
phases: [phase1]

Expand Down Expand Up @@ -74,7 +71,7 @@ runner:
phase:
- name: phase1
model: "{workspace}/model.py"
dataset_name: dataset_train
dataset_name: dataloader_train
thread_num: 1

- name: phase2
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def embedding_layer(input):

sparse_embed_seq = list(map(embedding_layer, self.sparse_input))
concated = paddle.concat(sparse_embed_seq + [self.dense_input], axis=1)
fluid.layers.Print(concated, message="concated")

fc1 = paddle.static.nn.fc(
x=concated,
Expand All @@ -78,7 +77,6 @@ def embedding_layer(input):
name="fc1",
weight_attr=paddle.ParamAttr(initializer=fluid.initializer.Normal(
scale=1.0 / math.sqrt(concated.shape[1]))))
fluid.layers.Print(fc1, message="fc1")

fc2 = paddle.static.nn.fc(
x=fc1,
Expand Down
File renamed without changes.
13 changes: 13 additions & 0 deletions benchmark/simnet_bow/dataset_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
147 changes: 147 additions & 0 deletions benchmark/simnet_bow/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import paddle
import paddle.fluid as fluid

from paddlerec.core.utils import envs
from paddlerec.core.model import ModelBase


class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)

def _init_hyper_parameters(self):
self.dense_feature_dim = envs.get_global_env(
"hyper_parameters.dense_feature_dim")
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim")
self.learning_rate = envs.get_global_env(
"hyper_parameters.optimizer.learning_rate")

def input_data(self, is_infer=False, **kwargs):
q = fluid.layers.data(
name="query", shape=[1], dtype="int64", lod_level=1)
pt = fluid.layers.data(
name="pos_title", shape=[1], dtype="int64", lod_level=1)
nt = fluid.layers.data(
name="neg_title", shape=[1], dtype="int64", lod_level=1)

inputs = [q, pt, nt]
return inputs

def net(self, input, is_infer=False):
dict_dim = self.dict_dim
emb_dim = self.emb_dim
hid_dim = self.hid_dim
base_lr = self.learning_rate
emb_lr = self.learning_rate * 3

q = input[0]
pt = input[1]
nt = input[2]

q_emb = fluid.layers.embedding(
input=q,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
name="__emb__", learning_rate=emb_lr),
is_sparse=is_sparse)
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = fluid.layers.softsign(q_sum)
# fc layer after conv
q_fc = fluid.layers.fc(input=q_ss,
size=hid_dim,
param_attr=fluid.ParamAttr(
name="__q_fc__",
learning_rate=base_lr,
initializer=fluid.initializer.Xavier()))
# embedding
pt_emb = fluid.layers.embedding(
input=pt,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
name="__emb__",
learning_rate=emb_lr,
initializer=fluid.initializer.Xavier()),
is_sparse=is_sparse)
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = fluid.layers.softsign(pt_sum)
# fc layer
pt_fc = fluid.layers.fc(input=pt_ss,
size=hid_dim,
param_attr=fluid.ParamAttr(
name="__fc__",
learning_rate=base_lr,
initializer=fluid.initializer.Xavier()),
bias_attr=fluid.ParamAttr(
name="__fc_b__",
initializer=fluid.initializer.Xavier()))

# embedding
nt_emb = fluid.layers.embedding(
input=nt,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
name="__emb__",
learning_rate=emb_lr,
initializer=fluid.initializer.Xavier()),
is_sparse=is_sparse)

# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = fluid.layers.softsign(nt_sum)
# fc layer
nt_fc = fluid.layers.fc(input=nt_ss,
size=hid_dim,
param_attr=fluid.ParamAttr(
name="__fc__",
learning_rate=base_lr,
initializer=fluid.initializer.Xavier()),
bias_attr=fluid.ParamAttr(
name="__fc_b__",
initializer=fluid.initializer.Xavier()))
cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc)
cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc)
# loss
avg_cost = self.get_loss(cos_q_pt, cos_q_nt, params)

def get_loss(self, cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub(
fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt,
shape=[-1, 1],
value=params.margin,
dtype='float32'),
cos_q_pt)
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt)
loss_op3 = fluid.layers.elementwise_max(
fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'),
loss_op2)
avg_cost = fluid.layers.mean(loss_op3)
return avg_cost

def optimizer(self):
optimizer = paddle.optimizer.SGD(self.learning_rate)
return optimizer

def infer_net(self):
pass
File renamed without changes.
13 changes: 13 additions & 0 deletions benchmark/word2vec/dataset_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
140 changes: 140 additions & 0 deletions benchmark/word2vec/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import paddle
import paddle.fluid as fluid

from paddlerec.core.utils import envs
from paddlerec.core.model import ModelBase


class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)

def _init_hyper_parameters(self):
self.dense_feature_dim = envs.get_global_env(
"hyper_parameters.dense_feature_dim")
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim")
self.learning_rate = envs.get_global_env(
"hyper_parameters.optimizer.learning_rate")

def input_data(self, is_infer=False, **kwargs):

input_word = fluid.layers.data(
name="input_word", shape=[1], dtype='int64', lod_level=1)
true_word = fluid.layers.data(
name='true_label', shape=[1], dtype='int64', lod_level=1)
neg_word = fluid.layers.data(
name="neg_label", shape=[1], dtype='int64', lod_level=1)
inputs = [input_word, true_word, neg_word]
return inputs

def net(self, input, is_infer=False):

init_width = 0.5 / params.embedding_size
input_emb = fluid.layers.embedding(
input=inputs[0],
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb',
initializer=fluid.initializer.Uniform(-init_width,
init_width)))

true_emb_w = fluid.layers.embedding(
input=inputs[1],
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w',
initializer=fluid.initializer.Constant(value=0.0)))

true_emb_b = fluid.layers.embedding(
input=inputs[1],
is_sparse=params.is_sparse,
size=[params.dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b',
initializer=fluid.initializer.Constant(value=0.0)))

neg_word_reshape = fluid.layers.reshape(inputs[2], shape=[-1, 1])
neg_word_reshape.stop_gradient = True

neg_emb_w = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w', learning_rate=1.0))

neg_emb_w_re = fluid.layers.reshape(
neg_emb_w, shape=[-1, params.nce_num, params.embedding_size])

neg_emb_b = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=params.is_sparse,
size=[params.dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b', learning_rate=1.0))

neg_emb_b_vec = fluid.layers.reshape(
neg_emb_b, shape=[-1, params.nce_num])

true_logits = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
fluid.layers.elementwise_mul(input_emb, true_emb_w),
dim=1,
keep_dim=True),
true_emb_b)

input_emb_re = fluid.layers.reshape(
input_emb, shape=[-1, 1, params.embedding_size])

neg_matmul = fluid.layers.matmul(
input_emb_re, neg_emb_w_re, transpose_y=True)
neg_matmul_re = fluid.layers.reshape(
neg_matmul, shape=[-1, params.nce_num])
neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
# nce loss

label_ones = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, 1], value=1.0, dtype='float32')
label_zeros = fluid.layers.fill_constant_batch_size_like(
true_logits,
shape=[-1, params.nce_num],
value=0.0,
dtype='float32')

true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(true_logits,
label_ones)
neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(neg_logits,
label_zeros)
cost = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
true_xent, dim=1),
fluid.layers.reduce_sum(
neg_xent, dim=1))
avg_cost = fluid.layers.reduce_mean(cost)

def optimizer(self):
optimizer = paddle.optimizer.Adam(self.learning_rate, lazy_mode=True)
return optimizer

def infer_net(self):
pass
Loading

0 comments on commit 741110d

Please sign in to comment.