@@ -160,11 +160,9 @@ class PnormCUDAKernel : public framework::OpKernel<T> {
160
160
auto ndim = out_norm->dims ();
161
161
float porder = ctx.Attr <float >(" porder" );
162
162
int axis = ctx.Attr <int >(" axis" );
163
- bool asvector = ctx.Attr <bool >(" asvector" );
164
163
if (axis < 0 ) axis = xdim.size () + axis;
165
164
std::vector<int > reduce_axis = {axis};
166
165
167
- auto & dev_ctx = ctx.cuda_device_context ();
168
166
auto stream = ctx.cuda_device_context ().stream ();
169
167
170
168
using MT = typename details::MPTypeTrait<T>::Type;
@@ -246,20 +244,14 @@ class PnormGradCUDAKernel : public framework::OpKernel<T> {
246
244
ctx.Input <framework::Tensor>(framework::GradVarName (" Out" ));
247
245
auto * out_dx = ctx.Output <framework::Tensor>(framework::GradVarName (" X" ));
248
246
T* dx = out_dx->mutable_data <T>(ctx.GetPlace ());
249
- const T* x = in_x->data <T>();
250
- const T* x_norm = in_norm->data <T>();
251
- const T* norm_dy = in_norm_dy->data <T>();
252
247
253
248
auto xdim = in_x->dims ();
254
249
float porder = ctx.Attr <float >(" porder" );
255
- T eps = static_cast <T>(ctx.Attr <float >(" epsilon" ));
256
250
int axis = ctx.Attr <int >(" axis" );
257
251
bool reduce_all = ((axis < 0 ) || (in_norm->numel () == 1 ));
258
- bool asvector = ctx.Attr <bool >(" asvector" );
259
252
if (axis < 0 ) axis = xdim.size () + axis;
260
253
const std::vector<int > dims = {axis};
261
254
262
- auto & dev_ctx = ctx.cuda_device_context ();
263
255
auto & cuda_ctx = ctx.template device_context <DeviceContext>();
264
256
265
257
if (porder == 0 ) {
0 commit comments