Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes for reference before assignment… #18640

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/demo_jax_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ def train_step(train_state, x, y):
print("\nTraining:")
data_iter = iter(train_data)
for epoch in range(EPOCHS):
loss_value = None # default
for i in tqdm(range(STEPS_PER_EPOCH)):
x, y = next(data_iter)
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
4 changes: 2 additions & 2 deletions guides/custom_train_step_in_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def train_step(self, state, data):
)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down Expand Up @@ -314,7 +314,7 @@ def test_step(self, state, data):
loss = self.compute_loss(x, y, y_pred)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ def get_replicated_train_state(devices):
# Custom training loop
for epoch in range(num_epochs):
data_iter = iter(train_data)
loss_value = None # default
for data in data_iter:
x, y = data
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/activations/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None
if threshold != 0:
Expand Down
4 changes: 4 additions & 0 deletions keras/src/applications/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,8 @@ def DenseNet(
cache_subdir="models",
file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
)
else:
raise ValueError("weights_path undefined")
else:
if blocks == [6, 12, 24, 16]:
weights_path = file_utils.get_file(
Expand All @@ -311,6 +313,8 @@ def DenseNet(
cache_subdir="models",
file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
)
else:
raise ValueError("weights_path undefined")
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
Expand Down
1 change: 1 addition & 0 deletions keras/src/backend/jax/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,7 @@ def fit(

self.make_train_function()
self.stop_training = False
training_logs = {}
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
for epoch in range(initial_epoch, epochs):
Expand Down
1 change: 1 addition & 0 deletions keras/src/backend/torch/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@ def fit(
)

self.stop_training = False
training_logs = {}
self.make_train_function()
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
Expand Down
2 changes: 1 addition & 1 deletion keras/src/callbacks/reduce_lr_on_plateau.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def on_epoch_end(self, epoch, logs=None):
self.model.optimizer.learning_rate = new_lr
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch +1}: "
f"\nEpoch {epoch + 1}: "
"ReduceLROnPlateau reducing "
f"learning rate to {new_lr}."
)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/layers/attention/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ def _calculate_scores(self, query, key):
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
else:
raise ValueError("scores not computed")

return scores

Expand Down
7 changes: 2 additions & 5 deletions keras/src/layers/preprocessing/center_crop_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,16 +164,13 @@ def test_input_smaller_than_crop_box(self, size, data_format):
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.CenterCrop(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])

def test_list_compatibility(self):
if backend.config.image_data_format() == "channels_last":
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/feature_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,8 +513,7 @@ def adapt(self, dataset):
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
for x in feature_dataset.take(1):
pass
x = next(iter(feature_dataset))
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/hashed_crossing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ def test_tf_data_compatibility(self):
.batch(5)
.map(lambda x1, x2: layer((x1, x2)))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(np.array([1, 4, 1, 1, 3]), output)

def test_upsupported_shape_input_fails(self):
Expand Down
5 changes: 3 additions & 2 deletions keras/src/layers/preprocessing/hashing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ def test_tf_data_compatibility(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
ds = tf.data.Dataset.from_tensor_slices(inp).batch(5).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))

@parameterized.named_parameters(
Expand Down Expand Up @@ -306,6 +305,8 @@ def test_count_output(self, input_value, expected_output, output_shape):
symbolic_sample_shape = ()
elif input_array.ndim == 2:
symbolic_sample_shape = (None,)
else:
raise TypeError("Unknown `symbolic_sample_shape`")
inputs = layers.Input(shape=symbolic_sample_shape, dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/integer_lookup_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,5 @@ def test_tf_data_compatibility(self):
)
input_data = [2, 3, 4, 5]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 4, 0]))
2 changes: 2 additions & 0 deletions keras/src/layers/preprocessing/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ def adapt(self, data):
batch_var + (batch_mean - new_total_mean) ** 2
) * batch_weight
total_mean = new_total_mean
else:
raise NotImplementedError(type(data))

self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_var)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/layers/preprocessing/normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def test_normalization_adapt(self, input_type):
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
else:
raise NotImplementedError(input_type)

layer = layers.Normalization()
layer.adapt(data)
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/random_contrast_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,5 +43,4 @@ def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()
7 changes: 2 additions & 5 deletions keras/src/layers/preprocessing/random_crop_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,9 @@ def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
6 changes: 2 additions & 4 deletions keras/src/layers/preprocessing/random_flip_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,7 @@ def test_tf_data_compatibility(self):
input_data = np.array([[[2, 3, 4]], [[5, 6, 7]]])
expected_output = np.array([[[5, 6, 7]], [[2, 3, 4]]])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
# Test 4D input: shape (2, 2, 1, 3)
layer = layers.RandomFlip("vertical", seed=42)
Expand All @@ -159,6 +158,5 @@ def test_tf_data_compatibility(self):
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/random_rotation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,5 @@ def test_tf_data_compatibility(self):
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/random_translation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,5 +327,4 @@ def test_tf_data_compatibility(self):
layer = layers.RandomTranslation(0.2, 0.1)
input_data = np.random.random((1, 4, 4, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(1).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/random_zoom_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,7 @@ def test_tf_data_compatibility(self):
[0, 0, 0, 0, 0],
]
).reshape(input_shape)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)

def test_dynamic_shape(self):
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/rescaling_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
Expand Down
6 changes: 2 additions & 4 deletions keras/src/layers/preprocessing/resizing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ def test_tf_data_compatibility(self):
layer = layers.Resizing(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)

@pytest.mark.skipif(
Expand All @@ -210,6 +209,5 @@ def test_tf_data_compatibility_sequential(self):
.batch(2)
.map(Sequential([layer]))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/string_lookup_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ def test_tf_data_compatibility(self):
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))

@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
Expand Down
6 changes: 2 additions & 4 deletions keras/src/layers/preprocessing/text_vectorization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ def test_tf_data_compatibility(self):
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))

# Test adapt flow
Expand All @@ -107,8 +106,7 @@ def test_tf_data_compatibility(self):
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
Expand Down
2 changes: 2 additions & 0 deletions keras/src/legacy/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -1279,6 +1279,8 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0):
negative_part = tf.nn.relu(-x + threshold)
else:
negative_part = tf.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None

Expand Down
5 changes: 3 additions & 2 deletions keras/src/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,7 @@ def quantize(self, mode):
def build_from_config(self, config):
if not config:
return
status = False
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
Expand All @@ -404,7 +405,7 @@ def build_from_config(self, config):
self.build(config["input_shape"])
status = True
except:
status = False
pass
self._build_shapes_dict = config

elif "shapes_dict" in config:
Expand All @@ -416,7 +417,7 @@ def build_from_config(self, config):
self.build(**config["shapes_dict"])
status = True
except:
status = False
pass
self._build_shapes_dict = config["shapes_dict"]

if not status:
Expand Down
1 change: 1 addition & 0 deletions keras/src/models/sequential.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ def from_config(cls, config, custom_objects=None):
model.add(layer)
if (
not model._functional
and "build_input_shape" in locals()
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
Expand Down
2 changes: 1 addition & 1 deletion keras/src/ops/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def map_graph(inputs, outputs):
"The following previous operations were accessed "
f"without issue: {operations_with_complete_input}"
)
operations_with_complete_input.append(operation.name)
operations_with_complete_input.append(node.operation.name)

for x in tree.flatten(node.outputs):
computable_tensors.add(x)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/optimizers/base_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -847,6 +847,8 @@ def get_config(self):
learning_rate = serialization_lib.serialize_keras_object(
self._learning_rate
)
else:
learning_rate = 0.5

config = {
"name": self.name,
Expand Down
4 changes: 2 additions & 2 deletions keras/src/trainers/compile_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ def variables(self):
return vars

def build(self, y_true, y_pred):
num_outputs = 1 # default
if self.output_names:
output_names = self.output_names
elif isinstance(y_pred, dict):
Expand All @@ -182,7 +183,6 @@ def build(self, y_true, y_pred):
output_names = None
else:
output_names = None
num_outputs = 1
if output_names:
num_outputs = len(output_names)

Expand Down Expand Up @@ -430,6 +430,7 @@ def __init__(
super().__init__(name="compile_loss", reduction=reduction)

def build(self, y_true, y_pred):
num_outputs = 1 # default
if self.output_names:
output_names = self.output_names
elif isinstance(y_pred, dict):
Expand All @@ -442,7 +443,6 @@ def build(self, y_true, y_pred):
output_names = None
else:
output_names = None
num_outputs = 1
if output_names:
num_outputs = len(output_names)

Expand Down
Loading
Loading