Skip to content

Commit bb7b4c0

Browse files
remove boost::algorithm::ends_with ,boost macro and boost::lexical_cast apis (#34310)
* replace boost::algorithm::ends_with with self define ends_with function * remove BOOST macro in certain operators * remove boost::lexical_cast * add test for string_helper * add more test case for string_helper * modify join_string func and test case * fix build_strategy_test failed bug * remove string_helper_test from parallel_UT_rule.py
1 parent 911c859 commit bb7b4c0

19 files changed

+333
-226
lines changed

paddle/fluid/distributed/common/sparse_sharding_merge.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#include <vector>
2222

2323
#include <ThreadPool.h>
24-
#include "boost/lexical_cast.hpp"
2524
#include "glog/logging.h"
2625
#include "paddle/fluid/distributed/common/utils.h"
2726
#include "paddle/fluid/framework/blocking_queue.h"
@@ -36,8 +35,6 @@ constexpr int Q_SIZE = 10000;
3635
constexpr int BUCKET = 10;
3736
constexpr char XEOF[] = "EOF";
3837

39-
using boost::lexical_cast;
40-
4138
inline double GetCurrentUS() {
4239
struct timeval time;
4340
gettimeofday(&time, NULL);
@@ -208,8 +205,10 @@ class ShardingMerge {
208205
for (int x = 0; x < embedding_dim; ++x) {
209206
float v = 0.0;
210207
try {
211-
v = lexical_cast<float>(values_str[x]);
212-
} catch (boost::bad_lexical_cast &e) {
208+
v = std::stof(values_str[x]);
209+
} catch (std::invalid_argument &e) {
210+
VLOG(0) << " get unexpected line: " << line;
211+
} catch (std::out_of_range &e) {
213212
VLOG(0) << " get unexpected line: " << line;
214213
}
215214
out->push_back(v);

paddle/fluid/distributed/index_dataset/index_wrapper.cc

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@ limitations under the License. */
1717
#include <vector>
1818
#include "paddle/fluid/framework/io/fs.h"
1919

20-
#include <boost/algorithm/string.hpp>
21-
#include <boost/lexical_cast.hpp>
2220
#include "paddle/fluid/distributed/index_dataset/index_wrapper.h"
2321

2422
namespace paddle {
@@ -65,7 +63,7 @@ int TreeIndex::Load(const std::string filename) {
6563
if (item.key() == ".tree_meta") {
6664
meta_.ParseFromString(item.value());
6765
} else {
68-
auto code = boost::lexical_cast<uint64_t>(item.key());
66+
auto code = std::stoull(item.key());
6967
IndexNode node;
7068
node.ParseFromString(item.value());
7169
PADDLE_ENFORCE_NE(node.id(), 0,

paddle/fluid/distributed/table/common_sparse_table.cc

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
#include "paddle/fluid/distributed/table/common_sparse_table.h"
1616
#include <sstream>
1717

18-
#include "boost/lexical_cast.hpp"
1918
#include "glog/logging.h"
2019
#include "paddle/fluid/platform/enforce.h"
2120

@@ -50,8 +49,11 @@ void CommonSparseTable::ProcessALine(const std::vector<std::string>& columns,
5049
float v = 0.0;
5150

5251
try {
53-
v = lexical_cast<float>(va);
54-
} catch (boost::bad_lexical_cast& e) {
52+
v = std::stof(va);
53+
} catch (std::invalid_argument& e) {
54+
VLOG(0) << "id: " << id << " get unexpected value: " << va
55+
<< " and be reset to: 0.0";
56+
} catch (std::out_of_range& e) {
5557
VLOG(0) << "id: " << id << " get unexpected value: " << va
5658
<< " and be reset to: 0.0";
5759
}
@@ -131,7 +133,7 @@ int64_t CommonSparseTable::LoadFromText(
131133

132134
while (std::getline(file, line)) {
133135
auto values = paddle::string::split_string<std::string>(line, "\t");
134-
auto id = lexical_cast<uint64_t>(values[0]);
136+
auto id = std::stoull(values[0]);
135137

136138
if (id % pserver_num != pserver_id) {
137139
VLOG(3) << "will not load " << values[0] << " from " << valuepath
@@ -150,10 +152,9 @@ int64_t CommonSparseTable::LoadFromText(
150152
VALUE* value_instant = block->GetValue(id);
151153

152154
if (values.size() == 5) {
153-
value_instant->count_ = lexical_cast<int>(values[1]);
154-
value_instant->unseen_days_ = lexical_cast<int>(values[2]);
155-
value_instant->is_entry_ =
156-
static_cast<bool>(lexical_cast<int>(values[3]));
155+
value_instant->count_ = std::stoi(values[1]);
156+
value_instant->unseen_days_ = std::stoi(values[2]);
157+
value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
157158
}
158159

159160
std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);

paddle/fluid/distributed/table/common_sparse_table.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
#include "paddle/fluid/string/string_helper.h"
3434

3535
#define PSERVER_SAVE_SUFFIX ".shard"
36-
using boost::lexical_cast;
3736

3837
namespace paddle {
3938
namespace distributed {

paddle/fluid/distributed/table/ssd_sparse_table.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ int64_t SSDSparseTable::LoadFromText(
310310

311311
while (std::getline(file, line)) {
312312
auto values = paddle::string::split_string<std::string>(line, "\t");
313-
auto id = lexical_cast<uint64_t>(values[0]);
313+
auto id = std::stoull(values[0]);
314314

315315
if (id % pserver_num != pserver_id) {
316316
VLOG(3) << "will not load " << values[0] << " from " << valuepath
@@ -329,10 +329,9 @@ int64_t SSDSparseTable::LoadFromText(
329329
VALUE* value_instant = block->GetValue(id);
330330

331331
if (values.size() == 5) {
332-
value_instant->count_ = lexical_cast<int>(values[1]);
333-
value_instant->unseen_days_ = lexical_cast<int>(values[2]);
334-
value_instant->is_entry_ =
335-
static_cast<bool>(lexical_cast<int>(values[3]));
332+
value_instant->count_ = std::stoi(values[1]);
333+
value_instant->unseen_days_ = std::stoi(values[2]);
334+
value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
336335
}
337336

338337
std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);

paddle/fluid/framework/details/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ if(NOT APPLE AND NOT WIN32 AND (WITH_GPU OR WITH_ROCM))
141141
endif()
142142
cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS})
143143
cc_test(build_strategy_test SRCS build_strategy_test.cc
144-
DEPS build_strategy op_registry op_proto_maker graph)
144+
DEPS build_strategy op_registry op_proto_maker graph string_helper)
145145

146146
if (WITH_MKLDNN)
147147
target_link_libraries(build_strategy mkldnn_placement_pass)

paddle/fluid/framework/fleet/fleet_wrapper.cc

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ void FleetWrapper::HeterPushSparseVars(
262262
int64_t* ids = tensor->data<int64_t>();
263263
int slot = 0;
264264
if (dump_slot) {
265-
slot = boost::lexical_cast<int>(sparse_key_names[i]);
265+
slot = std::stoi(sparse_key_names[i]);
266266
}
267267
Variable* g_var = scope.FindVar(sparse_grad_names[i]);
268268
if (g_var == nullptr) {
@@ -915,12 +915,17 @@ void FleetWrapper::PushSparseVarsWithLabelAsync(
915915
int slot = 0;
916916
if (dump_slot) {
917917
try {
918-
slot = boost::lexical_cast<int>(sparse_key_names[i]);
919-
} catch (boost::bad_lexical_cast const& e) {
918+
slot = std::stoi(sparse_key_names[i]);
919+
} catch (std::invalid_argument const& e) {
920920
PADDLE_THROW(platform::errors::PreconditionNotMet(
921921
"sparse var's name: %s, doesn't support non-integer type name when "
922922
"dump_slot=True",
923923
sparse_key_names[i]));
924+
} catch (std::out_of_range const& e) {
925+
PADDLE_THROW(platform::errors::PreconditionNotMet(
926+
"sparse var's name: %s, integer type name out of range when "
927+
"dump_slot=True",
928+
sparse_key_names[i]));
924929
}
925930
}
926931
Variable* g_var = scope.FindVar(sparse_grad_names[i]);
@@ -1121,7 +1126,7 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync(
11211126
data[click_index] = static_cast<float>(fea_labels.at(input_idx));
11221127
}
11231128
if (dump_slot) {
1124-
int slot = boost::lexical_cast<int>(input_names[index]);
1129+
int slot = std::stoi(input_names[index]);
11251130
data[0] = static_cast<float>(slot);
11261131
}
11271132
++input_idx;

paddle/fluid/framework/ir/lock_free_optimize_pass.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,9 @@
1717
#include <string>
1818
#include <vector>
1919

20-
#include <boost/algorithm/string/predicate.hpp>
21-
2220
#include "paddle/fluid/framework/ir/graph.h"
2321
#include "paddle/fluid/framework/ir/pass.h"
22+
#include "paddle/fluid/string/string_helper.h"
2423

2524
namespace paddle {
2625
namespace framework {
@@ -109,7 +108,7 @@ class LockFreeOptimizePass : public Pass {
109108
"Input argument node cannot be nullptr."));
110109

111110
return node->NodeType() == Node::Type::kVariable &&
112-
boost::algorithm::ends_with(node->Name(), name);
111+
paddle::string::ends_with(node->Name(), name);
113112
}
114113

115114
inline bool IsVarNameContains(ir::Node* node, const std::string& name) const {

paddle/fluid/operators/expand_as_op.h

100755100644
Lines changed: 36 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -13,42 +13,12 @@ limitations under the License. */
1313

1414
#include <vector>
1515

16-
#include <boost/preprocessor/arithmetic/div.hpp>
17-
#include <boost/preprocessor/arithmetic/mod.hpp>
18-
#include <boost/preprocessor/comparison/greater.hpp>
19-
#include <boost/preprocessor/comparison/greater_equal.hpp>
20-
#include <boost/preprocessor/control/if.hpp>
21-
#include <boost/preprocessor/repetition/repeat.hpp>
2216
#include "paddle/fluid/framework/eigen.h"
2317
#include "paddle/fluid/framework/op_registry.h"
2418
#include "paddle/fluid/framework/operator.h"
2519
#include "paddle/fluid/operators/eigen/eigen_function.h"
2620

2721
#define MAX_RANK_SUPPORTED 6
28-
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
29-
// Usage: BOOST_PP_REPEAT(count, macro, data).
30-
// This macro expands to the sequence:
31-
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
32-
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
33-
// So the range of n is 0-5(which is count-1).
34-
// We want to generate case 1-6 instead of case 0-5.
35-
// So we need to change n to n + 1.
36-
#define EXPAND_AS_TEMPLATE(z, n, data) \
37-
case n + 1: { \
38-
ExpandAs<n + 1>(context); \
39-
break; \
40-
}
41-
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
42-
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
43-
#define EXPAND_AS_GRAD_CASE(n) \
44-
case n + 1: { \
45-
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
46-
break; \
47-
}
48-
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
49-
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
50-
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
51-
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)
5222

5323
namespace paddle {
5424
namespace operators {
@@ -67,7 +37,24 @@ class ExpandAsKernel : public framework::OpKernel<T> {
6737
void Compute(const framework::ExecutionContext& context) const override {
6838
auto rank = context.Input<Tensor>("X")->dims().size();
6939
switch (rank) {
70-
REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED)
40+
case 1:
41+
ExpandAs<1>(context);
42+
break;
43+
case 2:
44+
ExpandAs<2>(context);
45+
break;
46+
case 3:
47+
ExpandAs<3>(context);
48+
break;
49+
case 4:
50+
ExpandAs<4>(context);
51+
break;
52+
case 5:
53+
ExpandAs<5>(context);
54+
break;
55+
case 6:
56+
ExpandAs<6>(context);
57+
break;
7158
default:
7259
PADDLE_THROW(platform::errors::InvalidArgument(
7360
"Only support tensor with rank being between 1 and 6. But received "
@@ -165,7 +152,24 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
165152
"to %d, but the value received is %d.",
166153
MAX_RANK_SUPPORTED, dims));
167154
switch (dims) {
168-
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
155+
case 1:
156+
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
157+
break;
158+
case 2:
159+
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
160+
break;
161+
case 3:
162+
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
163+
break;
164+
case 4:
165+
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
166+
break;
167+
case 5:
168+
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
169+
break;
170+
case 6:
171+
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
172+
break;
169173
default:
170174
PADDLE_THROW(platform::errors::InvalidArgument(
171175
"Only support tensor with rank being between 1 and 6. But "

paddle/fluid/operators/expand_as_v2_op.h

100755100644
Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -14,42 +14,12 @@ limitations under the License. */
1414
#include <algorithm>
1515
#include <vector>
1616

17-
#include <boost/preprocessor/arithmetic/div.hpp>
18-
#include <boost/preprocessor/arithmetic/mod.hpp>
19-
#include <boost/preprocessor/comparison/greater.hpp>
20-
#include <boost/preprocessor/comparison/greater_equal.hpp>
21-
#include <boost/preprocessor/control/if.hpp>
22-
#include <boost/preprocessor/repetition/repeat.hpp>
2317
#include "paddle/fluid/framework/eigen.h"
2418
#include "paddle/fluid/framework/op_registry.h"
2519
#include "paddle/fluid/framework/operator.h"
2620
#include "paddle/fluid/operators/eigen/eigen_function.h"
2721

2822
#define MAX_RANK_SUPPORTED 6
29-
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
30-
// Usage: BOOST_PP_REPEAT(count, macro, data).
31-
// This macro expands to the sequence:
32-
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
33-
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
34-
// So the range of n is 0-5(which is count-1).
35-
// We want to generate case 1-6 instead of case 0-5.
36-
// So we need to change n to n + 1.
37-
#define EXPAND_AS_TEMPLATE(z, n, data) \
38-
case n + 1: { \
39-
ExpandAs<n + 1>(context); \
40-
break; \
41-
}
42-
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
43-
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
44-
#define EXPAND_AS_GRAD_CASE(n) \
45-
case n + 1: { \
46-
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
47-
break; \
48-
}
49-
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
50-
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
51-
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
52-
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)
5323

5424
namespace paddle {
5525
namespace operators {
@@ -85,7 +55,26 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> {
8555
"expand_as_v2 op must be less than or equal to %d.",
8656
target_rank, MAX_RANK_SUPPORTED));
8757

88-
switch (target_rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) }
58+
switch (target_rank) {
59+
case 1:
60+
ExpandAs<1>(context);
61+
break;
62+
case 2:
63+
ExpandAs<2>(context);
64+
break;
65+
case 3:
66+
ExpandAs<3>(context);
67+
break;
68+
case 4:
69+
ExpandAs<4>(context);
70+
break;
71+
case 5:
72+
ExpandAs<5>(context);
73+
break;
74+
case 6:
75+
ExpandAs<6>(context);
76+
break;
77+
}
8978
}
9079

9180
protected:
@@ -186,7 +175,24 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> {
186175
"to %d, but the value received is %d.",
187176
MAX_RANK_SUPPORTED, dims));
188177
switch (dims) {
189-
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
178+
case 1:
179+
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
180+
break;
181+
case 2:
182+
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
183+
break;
184+
case 3:
185+
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
186+
break;
187+
case 4:
188+
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
189+
break;
190+
case 5:
191+
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
192+
break;
193+
case 6:
194+
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
195+
break;
190196
default:
191197
PADDLE_THROW(platform::errors::InvalidArgument(
192198
"Only support tensor with rank being between 1 and 6. But "

0 commit comments

Comments
 (0)