Skip to content

Commit

Permalink
New API and tests for missing operator overloading (#137)
Browse files Browse the repository at this point in the history
- New API and new operator overloading issue (#81)
- New gradient test.
- Reduce epochs in test.
  - Easier to run.
  • Loading branch information
khatchad authored Jan 31, 2024
1 parent 25d2f80 commit 2e9b1da
Show file tree
Hide file tree
Showing 8 changed files with 92 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,11 @@ public void testTf2()
testTf2("tf2_test_dataset10.py", "add", 2, 2, 2, 3);
testTf2("tf2_test_tensor_list.py", "add", 2, 2, 2, 3);
testTf2("tf2_test_tensor_list2.py", "add", 0, 0);
testTf2("tf2_test_tensor_list3.py", "add", 0, 0);
testTf2(
"tf2_test_tensor_list3.py",
"add",
0,
0); // NOTE: Change to 2, 2, 2, 3 once https://github.com/wala/ML/issues/136 is fixed.
testTf2("tf2_test_tensor_list4.py", "add", 0, 0);
testTf2("tf2_test_tensor_list5.py", "add", 0, 0);
testTf2("tf2_test_model_call.py", "SequentialModel.__call__", 1, 1, 3);
Expand All @@ -234,10 +238,10 @@ public void testTf2()
"neural_network.py",
"cross_entropy_loss",
1,
4,
8,
3); // NOTE: Change to 2 tensor parameters once https://github.com/wala/ML/issues/127 is
// fixed. Values 2 and 3 will correspond to the tensor parameters.
testTf2("neural_network.py", "run_optimization", 2, 2, 2, 3);
testTf2("neural_network.py", "run_optimization", 2, 3, 2, 3);
testTf2(
"neural_network.py",
"accuracy",
Expand All @@ -259,10 +263,19 @@ public void testTf2()
testTf2("tf2_test_add5.py", "f", 1, 1, 2);
testTf2("tf2_test_add6.py", "f", 1, 1, 2);
testTf2("multigpu_training.py", "run_optimization", 2, 4, 2, 3);
testTf2(
"multigpu_training.py",
"average_gradients",
0,
0); // NOTE: Change to 1, 1, 2 once https://github.com/wala/ML/issues/136 is fixed.
testTf2("tf2_test_reduce_mean.py", "f", 1, 1, 2);
testTf2("tf2_test_reduce_mean.py", "g", 1, 1, 2);
testTf2("tf2_test_reduce_mean.py", "h", 1, 1, 2);
testTf2("tf2_test_gradient.py", "f", 1, 1, 2);
testTf2("tf2_test_gradient2.py", "f", 1, 1, 2);
testTf2("tf2_test_multiply.py", "f", 1, 1, 2);
testTf2("tf2_test_multiply2.py", "f", 1, 1, 2);
testTf2("tf2_test_sparse_softmax_cross_entropy_with_logits.py", "f", 1, 1, 2);
}

private void testTf2(
Expand Down
23 changes: 23 additions & 0 deletions com.ibm.wala.cast.python.ml/data/tensorflow.xml
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,17 @@
<putfield class="LRoot" field="conv3d" fieldType="LRoot" ref="nn" value="conv3d" />
<new def="softmax" class="Ltensorflow/functions/softmax" />
<putfield class="LRoot" field="softmax" fieldType="LRoot" ref="nn" value="softmax" />
<new def="sparse_softmax_cross_entropy_with_logits" class="Ltensorflow/functions/sparse_softmax_cross_entropy_with_logits" />
<putfield class="LRoot" field="sparse_softmax_cross_entropy_with_logits" fieldType="LRoot" ref="nn" value="sparse_softmax_cross_entropy_with_logits" />
<new def="sigmoid" class="Ltensorflow/math/sigmoid" />
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="nn" value="sigmoid" />
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="math" value="sigmoid" />
<new def="add" class="Ltensorflow/math/add" />
<putfield class="LRoot" field="add" fieldType="LRoot" ref="x" value="add" />
<putfield class="LRoot" field="add" fieldType="LRoot" ref="math" value="add" />
<new def="multiply" class="Ltensorflow/math/multiply" />
<putfield class="LRoot" field="multiply" fieldType="LRoot" ref="x" value="multiply" />
<putfield class="LRoot" field="multiply" fieldType="LRoot" ref="math" value="multiply" />
<new def="reduce_mean" class="Ltensorflow/math/reduce_mean" />
<putfield class="LRoot" field="reduce_mean" fieldType="LRoot" ref="x" value="reduce_mean" />
<putfield class="LRoot" field="reduce_mean" fieldType="LRoot" ref="math" value="reduce_mean" />
Expand Down Expand Up @@ -285,6 +290,18 @@
<return value="xx" />
</method>
</class>
<class name="multiply" allocatable="true">
<!-- https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/multiply -->
<method name="read_data" descriptor="()LRoot;">
<new def="x" class="Ltensorflow/math/multiply" />
<return value="x" />
</method>
<method name="do" descriptor="()LRoot;" numArgs="4" paramNames="self x y name">
<!-- Even though tf.multiply() isn't a tensor "generator," it can convert its non-tensor arguments to tensors. -->
<call class="LRoot" name="read_data" descriptor="()LRoot;" type="virtual" arg0="arg0" def="xx" />
<return value="xx" />
</method>
</class>
<class name="reduce_mean" allocatable="true">
<!-- https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/reduce_mean -->
<method name="do" descriptor="()LRoot;" numArgs="5" paramNames="self input_tensor axis keepdims name">
Expand Down Expand Up @@ -682,6 +699,12 @@
<return value="logits" />
</method>
</class>
<class name="sparse_softmax_cross_entropy_with_logits" allocatable="true">
<!-- https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits -->
<method name="do" descriptor="()LRoot;" numArgs="4" paramNames="self labels logits name">
<return value="labels" />
</method>
</class>
</package>
<package name="tensorflow/estimator">
<class name="Estimator" allocatable="true">
Expand Down
4 changes: 2 additions & 2 deletions com.ibm.wala.cast.python.test/data/multigpu_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,11 @@

# Training parameters.
learning_rate = 0.001
training_steps = 1000
training_steps = 1
# Split batch size equally between GPUs.
# Note: Reduce batch size if you encounter OOM Errors.
batch_size = 1024 * num_gpus
display_step = 20
display_step = 1

# Network parameters.
conv1_filters = 64 # number of filters for 1st conv layer.
Expand Down
14 changes: 14 additions & 0 deletions com.ibm.wala.cast.python.test/data/tf2_test_gradient2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# From https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/GradientTape#gradient.

import tensorflow as tf


def f(a):
pass


x = tf.ragged.constant([[1.0, 2.0], [3.0]])
with tf.GradientTape() as g:
g.watch(x)
y = tf.multiply(x, x)
f(g.gradient(y, x))
11 changes: 11 additions & 0 deletions com.ibm.wala.cast.python.test/data/tf2_test_multiply.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# From https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/math/multiply#for_example/

import tensorflow as tf


def f(a):
pass


x = tf.constant(([1, 2, 3, 4]))
f(tf.math.multiply(x, x))
10 changes: 10 additions & 0 deletions com.ibm.wala.cast.python.test/data/tf2_test_multiply2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# From https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/math/multiply#for_example/

import tensorflow as tf


def f(a):
pass


f(tf.math.multiply(7, 6))
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# from https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits

import tensorflow as tf


def f(a):
pass


logits = tf.constant(
[[2.0, -5.0, 0.5, -0.1], [0.0, 0.0, 1.9, 1.4], [-100.0, 100.0, -100.0, -100.0]]
)
labels = tf.constant([0, 3, 1])
f(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits.numpy()))
2 changes: 2 additions & 0 deletions com.ibm.wala.cast.python.test/data/tf2_test_tensor_list3.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# Test https://github.com/wala/ML/issues/136.

import tensorflow as tf


Expand Down

0 comments on commit 2e9b1da

Please sign in to comment.