Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 20 additions & 4 deletions keras/src/backend/numpy/core.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import warnings

import numpy as np

from keras.src import tree
Expand Down Expand Up @@ -285,7 +287,21 @@ def unstack(x, num=None, axis=0):
return [x[i] for i in range(x.shape[0])]


def custom_gradient(fun):
raise NotImplementedError(
"`custom_gradient` is not supported with numpy backend"
)
class custom_gradient:
"""Decorator for custom gradients.

Args:
fun: Forward pass function.
"""

def __init__(self, fun):
warnings.warn(
"`custom_gradient` for the numpy backend acts as a pass-through to "
"support the forward pass. No gradient computation or modification "
"takes place."
)
self.fun = fun

def __call__(self, *args, **kwargs):
outputs, _ = self.fun(*args, **kwargs)
return outputs
2 changes: 2 additions & 0 deletions keras/src/backend/numpy/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def matmul(x1, x2):
dtype = "int32"
else:
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.matmul(x1, x2).astype(dtype)


Expand Down
2 changes: 0 additions & 2 deletions keras/src/layers/core/dense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,6 @@ def test_enable_lora_when_already_enabled(self):

# Test quantization-related (int8 and float8) methods

@pytest.mark.requires_trainable_backend
def test_quantize_int8(self):
layer = layers.Dense(units=16)
layer.build((None, 8))
Expand Down Expand Up @@ -764,7 +763,6 @@ def test_quantize_float8_fitting(self):
len(model.non_trainable_weights),
)

@pytest.mark.requires_trainable_backend
def test_quantize_float8_inference(self):
config = dict(units=16)
layer = layers.Dense(**config)
Expand Down
3 changes: 0 additions & 3 deletions keras/src/layers/core/einsum_dense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,6 @@ def test_lora_rank_argument(self):

# Test quantization-related (int8 and float8) methods

@pytest.mark.requires_trainable_backend
def test_quantize_int8(self):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
Expand Down Expand Up @@ -471,7 +470,6 @@ def test_quantize_int8(self):
("btd,ndh->btnh", "btd,ndh->btnh", (None, 2, 8), (1, 2, 4)),
("btd,df->btf", "btd,df->btf", (None, 4), (1, 2, 4)),
)
@pytest.mark.requires_trainable_backend
def test_quantize_int8_with_specific_equations(
self, equation, output_shape, input_shape
):
Expand Down Expand Up @@ -903,7 +901,6 @@ def test_quantize_float8_fitting(self):
len(model.non_trainable_weights),
)

@pytest.mark.requires_trainable_backend
def test_quantize_float8_inference(self):
config = dict(
equation="ab,bcd->acd",
Expand Down