Skip to content

Commit 0af059c

Browse files
committed
Squashed commit master (merge) to resolve conflicts
1 parent 72a035f commit 0af059c

File tree

143 files changed

+2155
-1162
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

143 files changed

+2155
-1162
lines changed

.github/bot_config.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,4 +16,3 @@
1616
# A list of assignees
1717
assignees:
1818
- tilakrayal
19-
- sushreebarsa

keras/activations.py

Lines changed: 30 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -63,12 +63,12 @@ def softmax(x, axis=-1):
6363
The input values in are the log-odds of the resulting probability.
6464
6565
Args:
66-
x : Input tensor.
67-
axis: Integer, axis along which the softmax normalization is applied.
66+
x : Input tensor.
67+
axis: Integer, axis along which the softmax normalization is applied.
6868
6969
Returns:
70-
Tensor, output of softmax transformation (all values are non-negative
71-
and sum to 1).
70+
Tensor, output of softmax transformation (all values are non-negative
71+
and sum to 1).
7272
7373
Examples:
7474
@@ -84,22 +84,7 @@ def softmax(x, axis=-1):
8484
>>> layer = tf.keras.layers.Dense(32,
8585
... activation=tf.keras.activations.softmax)
8686
"""
87-
if x.shape.rank <= 1:
88-
raise ValueError(
89-
f"Cannot apply softmax to a tensor that is 1D. Received input: {x}"
90-
)
91-
92-
if isinstance(axis, int):
93-
output = tf.nn.softmax(x, axis=axis)
94-
else:
95-
# nn.softmax does not support tuple axis.
96-
numerator = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
97-
denominator = tf.reduce_sum(numerator, axis=axis, keepdims=True)
98-
output = numerator / denominator
99-
100-
# Cache the logits to use for crossentropy loss.
101-
output._keras_logits = x
102-
return output
87+
return backend.softmax(x, axis)
10388

10489

10590
@keras_export("keras.activations.elu")
@@ -138,11 +123,11 @@ def elu(x, alpha=1.0):
138123
Args:
139124
x: Input tensor.
140125
alpha: A scalar, slope of negative section. `alpha` controls the value
141-
to which an ELU saturates for negative net inputs.
126+
to which an ELU saturates for negative net inputs.
142127
143128
Returns:
144129
The exponential linear unit (ELU) activation function: `x` if `x > 0`
145-
and `alpha * (exp(x) - 1)` if `x < 0`.
130+
and `alpha * (exp(x) - 1)` if `x < 0`.
146131
147132
148133
Reference:
@@ -196,9 +181,9 @@ def selu(x):
196181
197182
Notes:
198183
- To be used together with the
199-
`tf.keras.initializers.LecunNormal` initializer.
184+
`tf.keras.initializers.LecunNormal` initializer.
200185
- To be used together with the dropout variant
201-
`tf.keras.layers.AlphaDropout` (not regular dropout).
186+
`tf.keras.layers.AlphaDropout` (not regular dropout).
202187
203188
References:
204189
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
@@ -275,7 +260,7 @@ def swish(x):
275260
The swish activation applied to `x` (see reference paper for details).
276261
277262
Reference:
278-
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
263+
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
279264
"""
280265
return tf.nn.silu(x)
281266

@@ -307,16 +292,16 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0):
307292
Args:
308293
x: Input `tensor` or `variable`.
309294
alpha: A `float` that governs the slope for values lower than the
310-
threshold.
295+
threshold.
311296
max_value: A `float` that sets the saturation threshold (the largest
312-
value the function will return).
297+
value the function will return).
313298
threshold: A `float` giving the threshold value of the activation
314-
function below which values will be damped or set to zero.
299+
function below which values will be damped or set to zero.
315300
316301
Returns:
317-
A `Tensor` representing the input tensor,
318-
transformed by the relu activation function.
319-
Tensor will be of the same shape and dtype of input `x`.
302+
A `Tensor` representing the input tensor, transformed by the relu
303+
activation function. Tensor will be of the same shape and dtype of
304+
input `x`.
320305
"""
321306
return backend.relu(
322307
x, alpha=alpha, max_value=max_value, threshold=threshold
@@ -358,8 +343,8 @@ def gelu(x, approximate=False):
358343
if `approximate` is `False`.
359344
360345
Reference:
361-
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
362-
"""
346+
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
347+
""" # noqa: E501
363348
return tf.nn.gelu(x, approximate)
364349

365350

@@ -412,10 +397,7 @@ def sigmoid(x):
412397
Returns:
413398
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
414399
"""
415-
output = tf.sigmoid(x)
416-
# Cache the logits to use for crossentropy loss.
417-
output._keras_logits = x
418-
return output
400+
return backend.sigmoid(x)
419401

420402

421403
@keras_export("keras.activations.exponential")
@@ -459,11 +441,11 @@ def hard_sigmoid(x):
459441
x: Input tensor.
460442
461443
Returns:
462-
The hard sigmoid activation, defined as:
444+
The hard sigmoid activation, defined as:
463445
464-
- `if x < -2.5: return 0`
465-
- `if x > 2.5: return 1`
466-
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
446+
- `if x < -2.5: return 0`
447+
- `if x > 2.5: return 1`
448+
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
467449
"""
468450
return backend.hard_sigmoid(x)
469451

@@ -535,6 +517,8 @@ def serialize(activation, use_legacy_format=False):
535517
536518
Args:
537519
activation : Function object.
520+
use_legacy_format: Boolean, whether to use the legacy format for
521+
serialization. Defaults to False.
538522
539523
Returns:
540524
String denoting the name attribute of the input function
@@ -608,9 +592,11 @@ def deserialize(name, custom_objects=None, use_legacy_format=False):
608592
"""Returns activation function given a string identifier.
609593
610594
Args:
611-
name: The name of the activation function.
612-
custom_objects: Optional `{function_name: function_obj}`
613-
dictionary listing user-provided activation functions.
595+
name: The name of the activation function.
596+
custom_objects: Optional `{function_name: function_obj}`
597+
dictionary listing user-provided activation functions.
598+
use_legacy_format: Boolean, whether to use the legacy format for
599+
deserialization. Defaults to False.
614600
615601
Returns:
616602
Corresponding activation function.

keras/api/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ load("//keras/api:api_gen.bzl", "gen_api_init_files")
55
load("//keras/api:api_init_files.bzl", "KERAS_API_INIT_FILES", "KERAS_API_INIT_FILES_V1")
66

77
package(
8+
# copybara:uncomment default_applicable_licenses = ["//keras:license"],
89
default_visibility = [
910
"//keras:friends",
1011
"//third_party/py/tensorflow:__subpackages__",

keras/api/api_gen.bzl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def gen_api_init_files(
119119
_make_cmd(api_gen_binary_target, flags, loading = "default"),
120120
),
121121
srcs = srcs,
122-
exec_tools = [":" + api_gen_binary_target],
122+
tools = [":" + api_gen_binary_target],
123123
visibility = ["//visibility:public"],
124124
)
125125

keras/api/golden/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# TensorFlow API backwards compatibility test goldens.
22

33
package(
4+
# copybara:uncomment default_applicable_licenses = ["//keras:license"],
45
default_visibility = ["//visibility:public"],
56
licenses = ["notice"], # Apache 2.0
67
)

keras/api/golden/v1/tensorflow.keras.-model.pbtxt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,10 @@ tf_class {
3636
name: "dynamic"
3737
mtype: "<type \'property\'>"
3838
}
39+
member {
40+
name: "enable_tune_steps_per_execution"
41+
mtype: "<type \'property\'>"
42+
}
3943
member {
4044
name: "inbound_nodes"
4145
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.-sequential.pbtxt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,10 @@ tf_class {
3838
name: "dynamic"
3939
mtype: "<type \'property\'>"
4040
}
41+
member {
42+
name: "enable_tune_steps_per_execution"
43+
mtype: "<type \'property\'>"
44+
}
4145
member {
4246
name: "inbound_nodes"
4347
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40+
member {
41+
name: "enable_tune_steps_per_execution"
42+
mtype: "<type \'property\'>"
43+
}
4044
member {
4145
name: "inbound_nodes"
4246
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40+
member {
41+
name: "enable_tune_steps_per_execution"
42+
mtype: "<type \'property\'>"
43+
}
4044
member {
4145
name: "inbound_nodes"
4246
mtype: "<type \'property\'>"

keras/api/golden/v1/tensorflow.keras.models.-linear-model.pbtxt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ tf_class {
3737
name: "dynamic"
3838
mtype: "<type \'property\'>"
3939
}
40+
member {
41+
name: "enable_tune_steps_per_execution"
42+
mtype: "<type \'property\'>"
43+
}
4044
member {
4145
name: "inbound_nodes"
4246
mtype: "<type \'property\'>"

0 commit comments

Comments
 (0)