Skip to content

Commit

Permalink
remove unused parameters in optimizer tests (pytorch#18084)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#18084

data_strategy parameter was not used in some of unit tests for optimizers

Reviewed By: hyuen

Differential Revision: D14487830

fbshipit-source-id: d757cd06aa2965f4c0570a4a18ba090b98820ef4
  • Loading branch information
jspark1105 authored and facebook-github-bot committed Mar 16, 2019
1 parent be364ac commit c7448aa
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 12 deletions.
4 changes: 1 addition & 3 deletions caffe2/python/operator_test/adadelta_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,8 @@ def ref_sparse(param, moment, moment_delta, indices, grad, lr, decay,
allow_nan=False, allow_infinity=False),
decay=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
data_strategy=st.data(),
**hu.gcs)
def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay,
data_strategy, gc, dc):
def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay, gc, dc):
param, moment, moment_delta = inputs
moment = np.abs(moment)
lr = np.array([lr], dtype=np.float32)
Expand Down
9 changes: 3 additions & 6 deletions caffe2/python/operator_test/adagrad_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,9 @@ def test_sparse_adagrad(self, inputs, lr, epsilon, gc, dc):
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
data_strategy=st.data(),
**hu.gcs
)
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc, dc):
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
param, momentum = inputs
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)

Expand Down Expand Up @@ -176,10 +175,9 @@ def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc, dc):
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
data_strategy=st.data(),
**hu.gcs
)
def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, dc):
def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, gc, dc):
adagrad_sparse_test_helper(
self,
inputs,
Expand All @@ -200,11 +198,10 @@ def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, d
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
data_strategy=st.data(),
**hu.gcs
)
def test_row_wise_sparse_adagrad_empty(
self, inputs, lr, epsilon, data_strategy, gc, dc
self, inputs, lr, epsilon, gc, dc
):
param, momentum = inputs
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
Expand Down
4 changes: 1 addition & 3 deletions caffe2/python/operator_test/wngrad_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,8 @@ def test_sparse_wngrad(self, inputs, seq_b, lr, epsilon, gc, dc):
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
data_strategy=st.data(),
**hu.gcs_cpu_only)
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon,
data_strategy, gc, dc):
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon, gc, dc):
param = inputs[0]
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
Expand Down

0 comments on commit c7448aa

Please sign in to comment.