text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
class DataAdapter(object):
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields JAX arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
| keras/keras/trainers/data_adapters/data_adapter.py/0 | {
"file_path": "keras/keras/trainers/data_adapters/data_adapter.py",
"repo_id": "keras",
"token_count": 1169
} | 200 |
import numpy as np
from keras.api_export import keras_export
from keras.utils import dataset_utils
from keras.utils.module_utils import tensorflow as tf
from keras.utils.module_utils import tensorflow_io as tfio
ALLOWED_FORMATS = (".wav",)
@keras_export("keras.utils.audio_dataset_from_directory")
def audio_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
batch_size=32,
sampling_rate=None,
output_sequence_length=None,
ragged=False,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False,
):
"""Generates a `tf.data.Dataset` from audio files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_audio_1.wav
......a_audio_2.wav
...class_b/
......b_audio_1.wav
......b_audio_2.wav
```
Then calling `audio_dataset_from_directory(main_directory,
labels='inferred')`
will return a `tf.data.Dataset` that yields batches of audio files from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Only `.wav` files are supported at this time.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain subdirectories,
each containing audio files for a class. Otherwise, the directory
structure is ignored.
labels: Either "inferred" (labels are generated from the directory
structure), `None` (no labels), or a list/tuple of integer labels
of the same size as the number of audio files found in
the directory. Labels should be sorted according to the
alphanumeric order of the audio file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers (e.g. for
`sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are encoded as a categorical
vector (e.g. for `categorical_crossentropy` loss)
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0
or 1 (e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if "labels" is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
batch_size: Size of the batches of data. Default: 32. If `None`,
the data will not be batched
(the dataset will yield individual samples).
sampling_rate: Audio sampling rate (in samples per second).
output_sequence_length: Maximum length of an audio sequence. Audio files
longer than this will be truncated to `output_sequence_length`.
If set to `None`, then all sequences in the same batch will
be padded to the
length of the longest sequence in the batch.
ragged: Whether to return a Ragged dataset (where each sequence has its
own length). Defaults to `False`.
shuffle: Whether to shuffle the data. Defaults to `True`.
If set to `False`, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1, fraction of data to
reserve for validation.
subset: Subset of the data to return. One of `"training"`,
`"validation"` or `"both"`. Only used if `validation_split` is set.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to `False`.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is `None`, it yields `string` tensors of shape
`(batch_size,)`, containing the contents of a batch of audio files.
- Otherwise, it yields a tuple `(audio, labels)`, where `audio`
has shape `(batch_size, sequence_length, num_channels)` and `labels`
follows the format described
below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorical`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"The `labels` argument should be a list/tuple of integer "
"labels, of the same size as the number of audio files in "
"the target directory. If you wish to infer the labels from "
"the subdirectory names in the target directory,"
' pass `labels="inferred"`. '
"If you wish to get a dataset that only contains audio samples "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", '
'"binary", '
f"or None. Received: label_mode={label_mode}"
)
if ragged and output_sequence_length is not None:
raise ValueError(
"Cannot set both `ragged` and `output_sequence_length`"
)
if sampling_rate is not None:
if not isinstance(sampling_rate, int):
raise ValueError(
"`sampling_rate` should have an integer value. "
f"Received: sampling_rate={sampling_rate}"
)
if sampling_rate <= 0:
raise ValueError(
"`sampling_rate` should be higher than 0. "
f"Received: sampling_rate={sampling_rate}"
)
if not tfio.available:
raise ImportError(
"To use the argument `sampling_rate`, you should install "
"tensorflow_io. You can install it via `pip install "
"tensorflow-io`."
)
if labels is None or label_mode is None:
labels = None
label_mode = None
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWED_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
if subset == "both":
train_dataset, val_dataset = get_training_and_validation_dataset(
file_paths=file_paths,
labels=labels,
validation_split=validation_split,
directory=directory,
label_mode=label_mode,
class_names=class_names,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
train_dataset = prepare_dataset(
dataset=train_dataset,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
val_dataset = prepare_dataset(
dataset=val_dataset,
batch_size=batch_size,
shuffle=False,
seed=seed,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return train_dataset, val_dataset
else:
dataset = get_dataset(
file_paths=file_paths,
labels=labels,
directory=directory,
validation_split=validation_split,
subset=subset,
label_mode=label_mode,
class_names=class_names,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
dataset = prepare_dataset(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
class_names=class_names,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return dataset
def prepare_dataset(
dataset,
batch_size,
shuffle,
seed,
class_names,
output_sequence_length,
ragged,
):
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if shuffle:
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
if output_sequence_length is None and not ragged:
dataset = dataset.padded_batch(
batch_size, padded_shapes=([None, None], [])
)
else:
dataset = dataset.batch(batch_size)
else:
if shuffle:
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def get_training_and_validation_dataset(
file_paths,
labels,
validation_split,
directory,
label_mode,
class_names,
sampling_rate,
output_sequence_length,
ragged,
):
(
file_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "training"
)
if not file_paths_train:
raise ValueError(
f"No training audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
file_paths_val, labels_val = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "validation"
)
if not file_paths_val:
raise ValueError(
f"No validation audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
train_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_train,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
val_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_val,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return train_dataset, val_dataset
def get_dataset(
file_paths,
labels,
directory,
validation_split,
subset,
label_mode,
class_names,
sampling_rate,
output_sequence_length,
ragged,
):
file_paths, labels = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
if not file_paths:
raise ValueError(
f"No audio files found in directory {directory}. "
f"Allowed format(s): {ALLOWED_FORMATS}"
)
dataset = paths_and_labels_to_dataset(
file_paths=file_paths,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
sampling_rate=sampling_rate,
output_sequence_length=output_sequence_length,
ragged=ragged,
)
return dataset
def read_and_decode_audio(
path, sampling_rate=None, output_sequence_length=None
):
"""Reads and decodes audio file."""
audio = tf.io.read_file(path)
if output_sequence_length is None:
output_sequence_length = -1
audio, default_audio_rate = tf.audio.decode_wav(
contents=audio, desired_samples=output_sequence_length
)
if sampling_rate is not None:
# default_audio_rate should have dtype=int64
default_audio_rate = tf.cast(default_audio_rate, tf.int64)
audio = tfio.audio.resample(
input=audio, rate_in=default_audio_rate, rate_out=sampling_rate
)
return audio
def paths_and_labels_to_dataset(
file_paths,
labels,
label_mode,
num_classes,
sampling_rate,
output_sequence_length,
ragged,
):
"""Constructs a fixed-size dataset of audio and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
audio_ds = path_ds.map(
lambda x: read_and_decode_audio(
x, sampling_rate, output_sequence_length
),
num_parallel_calls=tf.data.AUTOTUNE,
)
if ragged:
audio_ds = audio_ds.map(
lambda x: tf.RaggedTensor.from_tensor(x),
num_parallel_calls=tf.data.AUTOTUNE,
)
if label_mode:
label_ds = dataset_utils.labels_to_dataset(
labels, label_mode, num_classes
)
audio_ds = tf.data.Dataset.zip((audio_ds, label_ds))
return audio_ds
| keras/keras/utils/audio_dataset_utils.py/0 | {
"file_path": "keras/keras/utils/audio_dataset_utils.py",
"repo_id": "keras",
"token_count": 6200
} | 201 |
from keras import backend
def is_in_jax_tracing_scope(x=None):
if backend.backend() == "jax":
if x is None:
x = backend.numpy.ones(())
if x.__class__.__name__ == "DynamicJaxprTracer":
return True
return False
| keras/keras/utils/jax_utils.py/0 | {
"file_path": "keras/keras/utils/jax_utils.py",
"repo_id": "keras",
"token_count": 122
} | 202 |
import math
import re
import shutil
import rich
import rich.console
import rich.markup
# See https://github.com/keras-team/keras/issues/448
# for below imports
import rich.table
import tree
from keras import backend
from keras.utils import dtype_utils
from keras.utils import io_utils
def count_params(weights):
shapes = [v.shape for v in weights]
return int(sum(math.prod(p) for p in shapes))
def weight_memory_size(weights):
"""Compute the memory footprint for weights based on their dtypes.
Args:
weights: An iterable contains the weights to compute weight size.
Returns:
The total memory size (in Bytes) of the weights.
"""
unique_weights = {id(w): w for w in weights}.values()
total_memory_size = 0
for w in unique_weights:
weight_shape = math.prod(w.shape)
dtype = backend.standardize_dtype(w.dtype)
per_param_size = dtype_utils.dtype_size(dtype)
total_memory_size += weight_shape * per_param_size
return total_memory_size / 8
def readable_memory_size(weight_memory_size):
"""Convert the weight memory size (Bytes) to a readable string."""
units = ["B", "KB", "MB", "GB", "TB", "PB"]
scale = 1024
for unit in units:
if weight_memory_size / scale < 1:
return "{:.2f} {}".format(weight_memory_size, unit)
else:
weight_memory_size /= scale
return "{:.2f} {}".format(weight_memory_size, units[-1])
def highlight_number(x):
"""Themes numbers in a summary using rich markup.
We use a separate color for `None`s, e.g. in a layer shape.
"""
if x is None:
return f"[color(45)]{x}[/]"
else:
return f"[color(34)]{x}[/]"
def highlight_symbol(x):
"""Themes keras symbols in a summary using rich markup."""
return f"[color(33)]{x}[/]"
def bold_text(x, color=None):
"""Bolds text using rich markup."""
if color:
return f"[bold][color({color})]{x}[/][/]"
return f"[bold]{x}[/]"
def format_layer_shape(layer):
if not layer._inbound_nodes:
return "?"
def format_shape(shape):
highlighted = [highlight_number(x) for x in shape]
return "(" + ", ".join(highlighted) + ")"
for i in range(len(layer._inbound_nodes)):
outputs = layer._inbound_nodes[i].output_tensors
output_shapes = tree.map_structure(
lambda x: format_shape(x.shape), outputs
)
if len(output_shapes) == 1:
return output_shapes[0]
out = str(output_shapes)
out = out.replace("'", "")
return out
def print_summary(
model,
line_length=None,
positions=None,
print_fn=None,
expand_nested=False,
show_trainable=False,
layer_range=None,
):
"""Prints a summary of a model.
Args:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[0.3, 0.6, 0.70, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
expand_nested: Whether to expand the nested models.
If not provided, defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
If not provided, defaults to `False`.
layer_range: List or tuple containing two strings,
the starting layer name and ending layer name (both inclusive),
indicating the range of layers to be printed in the summary. The
strings could also be regexes instead of an exact name. In this
case, the starting layer will be the first layer that matches
`layer_range[0]` and the ending layer will be the last element that
matches `layer_range[1]`. By default (`None`) all
layers in the model are included in the summary.
"""
from keras.models import Functional
from keras.models import Sequential
if not print_fn and not io_utils.is_interactive_logging_enabled():
print_fn = io_utils.print_msg
if isinstance(model, Sequential):
sequential_like = True
layers = model.layers
elif not isinstance(model, Functional):
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
layers = model.layers
else:
layers = model._operations
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (
len(v) == 1 and len(tree.flatten(v[0].input_tensors)) > 1
):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
default_line_length = 88
positions = positions or [0.45, 0.80, 1.0]
# header names for the different log elements
header = ["Layer (type)", "Output Shape", "Param #"]
alignment = ["left", "left", "right"]
else:
default_line_length = 108
positions = positions or [0.3, 0.56, 0.74, 1.0]
# header names for the different log elements
header = ["Layer (type)", "Output Shape", "Param #", "Connected to"]
alignment = ["left", "left", "right", "left"]
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
if show_trainable:
default_line_length += 12
positions = [p * 0.90 for p in positions] + [1.0]
header.append("Trainable")
alignment.append("center")
# Compute columns widths
default_line_length = min(
default_line_length, shutil.get_terminal_size().columns - 4
)
line_length = line_length or default_line_length
column_widths = []
current = 0
for pos in positions:
width = int(pos * line_length) - current
if width < 4:
raise ValueError("Insufficient console width to print summary.")
column_widths.append(width)
current += width
# Render summary as a rich table.
columns = []
# Right align parameter counts.
for i, name in enumerate(header):
column = rich.table.Column(
name,
justify=alignment[i],
width=column_widths[i],
)
columns.append(column)
table = rich.table.Table(*columns, width=line_length, show_lines=True)
def get_connections(layer):
connections = ""
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for kt in node.input_tensors:
keras_history = kt._keras_history
inbound_layer = keras_history.operation
node_index = highlight_number(keras_history.node_index)
tensor_index = highlight_number(keras_history.tensor_index)
if connections:
connections += ", "
connections += (
f"{inbound_layer.name}[{node_index}][{tensor_index}]"
)
if not connections:
connections = "-"
return connections
def get_layer_fields(layer, prefix=""):
output_shape = format_layer_shape(layer)
name = prefix + layer.name
cls_name = layer.__class__.__name__
name = rich.markup.escape(name)
name += f" ({highlight_symbol(rich.markup.escape(cls_name))})"
if not hasattr(layer, "built"):
params = highlight_number(0)
elif not layer.built:
params = highlight_number(0) + " (unbuilt)"
else:
params = highlight_number(f"{layer.count_params():,}")
fields = [name, output_shape, params]
if not sequential_like:
fields.append(get_connections(layer))
if show_trainable:
if layer.weights:
fields.append(
bold_text("Y", color=34)
if layer.trainable
else bold_text("N", color=9)
)
else:
fields.append(bold_text("-"))
return fields
def print_layer(layer, nested_level=0):
if nested_level:
prefix = " " * nested_level + "└" + " "
else:
prefix = ""
fields = get_layer_fields(layer, prefix=prefix)
rows = [fields]
if expand_nested and hasattr(layer, "layers") and layer.layers:
nested_layers = layer.layers
nested_level += 1
for i in range(len(nested_layers)):
rows.extend(
print_layer(nested_layers[i], nested_level=nested_level)
)
return rows
# Render all layers to the rich table.
layer_range = get_layer_index_bound_by_layer_name(layers, layer_range)
for layer in layers[layer_range[0] : layer_range[1]]:
for row in print_layer(layer):
table.add_row(*row)
# After the table, append information about parameter count and size.
if hasattr(model, "_collected_trainable_weights"):
trainable_count = count_params(model._collected_trainable_weights)
trainable_memory_size = weight_memory_size(
model._collected_trainable_weights
)
else:
trainable_count = count_params(model.trainable_weights)
trainable_memory_size = weight_memory_size(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
non_trainable_memory_size = weight_memory_size(model.non_trainable_weights)
if model.compiled and model.optimizer and model.optimizer.built:
optimizer_weight_count = count_params(model.optimizer.variables)
optimizer_memory_size = weight_memory_size(model.optimizer.variables)
optimizer_built = True
else:
optimizer_weight_count = 0
optimizer_memory_size = 0
optimizer_built = False
total_count = trainable_count + non_trainable_count + optimizer_weight_count
total_memory_size = (
trainable_memory_size
+ non_trainable_memory_size
+ optimizer_memory_size
)
# Create a rich console for printing. Capture for non-interactive logging.
if print_fn:
console = rich.console.Console(
highlight=False, force_terminal=False, color_system=None
)
console.begin_capture()
else:
console = rich.console.Console(highlight=False)
# Print the to the console.
console.print(bold_text(f'Model: "{rich.markup.escape(model.name)}"'))
console.print(table)
console.print(
bold_text(" Total params: ")
+ highlight_number(f"{total_count:,}")
+ f" ({readable_memory_size(total_memory_size)})"
)
console.print(
bold_text(" Trainable params: ")
+ highlight_number(f"{trainable_count:,}")
+ f" ({readable_memory_size(trainable_memory_size)})"
)
console.print(
bold_text(" Non-trainable params: ")
+ highlight_number(f"{non_trainable_count:,}")
+ f" ({readable_memory_size(non_trainable_memory_size)})"
)
if optimizer_built:
console.print(
bold_text(" Optimizer params: ")
+ highlight_number(f"{optimizer_weight_count:,}")
+ f" ({readable_memory_size(optimizer_memory_size)})"
)
# Output captured summary for non-interactive logging.
if print_fn:
print_fn(console.end_capture(), line_break=False)
def get_layer_index_bound_by_layer_name(layers, layer_range=None):
"""Get the layer indexes from the model based on layer names.
The layer indexes can be used to slice the model into sub models for
display.
Args:
model: `Model` instance.
layer_names: a list or tuple of 2 strings, the starting layer name and
ending layer name (both inclusive) for the result. All layers will
be included when `None` is provided.
Returns:
The index value of layer based on its unique name (layer_names).
Output will be [first_layer_index, last_layer_index + 1].
"""
if layer_range is not None:
if len(layer_range) != 2:
raise ValueError(
"layer_range must be a list or tuple of length 2. Received: "
f"layer_range = {layer_range} of length {len(layer_range)}"
)
if not isinstance(layer_range[0], str) or not isinstance(
layer_range[1], str
):
raise ValueError(
"layer_range should contain string type only. "
f"Received: {layer_range}"
)
else:
return [0, len(layers)]
lower_index = [
idx
for idx, layer in enumerate(layers)
if re.match(layer_range[0], layer.name)
]
upper_index = [
idx
for idx, layer in enumerate(layers)
if re.match(layer_range[1], layer.name)
]
if not lower_index or not upper_index:
raise ValueError(
"Passed layer_names do not match the layer names in the model. "
f"Received: {layer_range}"
)
if min(lower_index) > max(upper_index):
return [min(upper_index), max(lower_index) + 1]
return [min(lower_index), max(upper_index) + 1]
| keras/keras/utils/summary_utils.py/0 | {
"file_path": "keras/keras/utils/summary_utils.py",
"repo_id": "keras",
"token_count": 6292
} | 203 |
#!/bin/bash
isort --sl tf_keras
black --line-length 80 tf_keras
flake8 tf_keras
| tf-keras/shell/format.sh/0 | {
"file_path": "tf-keras/shell/format.sh",
"repo_id": "tf-keras",
"token_count": 33
} | 204 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TF-Keras applications."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl import flags
from absl.testing import parameterized
from tf_keras.applications import convnext
from tf_keras.applications import densenet
from tf_keras.applications import efficientnet
from tf_keras.applications import efficientnet_v2
from tf_keras.applications import inception_resnet_v2
from tf_keras.applications import inception_v3
from tf_keras.applications import mobilenet
from tf_keras.applications import mobilenet_v2
from tf_keras.applications import mobilenet_v3
from tf_keras.applications import nasnet
from tf_keras.applications import regnet
from tf_keras.applications import resnet
from tf_keras.applications import resnet_rs
from tf_keras.applications import resnet_v2
from tf_keras.applications import vgg16
from tf_keras.applications import vgg19
from tf_keras.applications import xception
from tf_keras.utils import data_utils
from tf_keras.utils import image_utils
ARG_TO_MODEL = {
"resnet": (resnet, [resnet.ResNet50, resnet.ResNet101, resnet.ResNet152]),
"resnet_v2": (
resnet_v2,
[resnet_v2.ResNet50V2, resnet_v2.ResNet101V2, resnet_v2.ResNet152V2],
),
"vgg16": (vgg16, [vgg16.VGG16]),
"vgg19": (vgg19, [vgg19.VGG19]),
"xception": (xception, [xception.Xception]),
"inception_v3": (inception_v3, [inception_v3.InceptionV3]),
"inception_resnet_v2": (
inception_resnet_v2,
[inception_resnet_v2.InceptionResNetV2],
),
"mobilenet": (mobilenet, [mobilenet.MobileNet]),
"mobilenet_v2": (mobilenet_v2, [mobilenet_v2.MobileNetV2]),
"mobilenet_v3_small": (mobilenet_v3, [mobilenet_v3.MobileNetV3Small]),
"mobilenet_v3_large": (mobilenet_v3, [mobilenet_v3.MobileNetV3Large]),
"convnext": (
convnext,
[
convnext.ConvNeXtTiny,
convnext.ConvNeXtSmall,
convnext.ConvNeXtBase,
convnext.ConvNeXtLarge,
convnext.ConvNeXtXLarge,
],
),
"densenet": (
densenet,
[densenet.DenseNet121, densenet.DenseNet169, densenet.DenseNet201],
),
"nasnet_mobile": (nasnet, [nasnet.NASNetMobile]),
"nasnet_large": (nasnet, [nasnet.NASNetLarge]),
"efficientnet": (
efficientnet,
[
efficientnet.EfficientNetB0,
efficientnet.EfficientNetB1,
efficientnet.EfficientNetB2,
efficientnet.EfficientNetB3,
efficientnet.EfficientNetB4,
efficientnet.EfficientNetB5,
efficientnet.EfficientNetB6,
efficientnet.EfficientNetB7,
],
),
"efficientnet_v2": (
efficientnet_v2,
[
efficientnet_v2.EfficientNetV2B0,
efficientnet_v2.EfficientNetV2B1,
efficientnet_v2.EfficientNetV2B2,
efficientnet_v2.EfficientNetV2B3,
efficientnet_v2.EfficientNetV2S,
efficientnet_v2.EfficientNetV2M,
efficientnet_v2.EfficientNetV2L,
],
),
"resnet_rs": (
resnet_rs,
[
resnet_rs.ResNetRS50,
resnet_rs.ResNetRS101,
resnet_rs.ResNetRS152,
resnet_rs.ResNetRS200,
resnet_rs.ResNetRS270,
resnet_rs.ResNetRS350,
resnet_rs.ResNetRS420,
],
),
"regnet": (
regnet,
[
regnet.RegNetX002,
regnet.RegNetX004,
regnet.RegNetX006,
regnet.RegNetX008,
regnet.RegNetX016,
regnet.RegNetX032,
regnet.RegNetX040,
regnet.RegNetX064,
regnet.RegNetX080,
regnet.RegNetX120,
regnet.RegNetX160,
regnet.RegNetX320,
regnet.RegNetY002,
regnet.RegNetY004,
regnet.RegNetY006,
regnet.RegNetY008,
regnet.RegNetY016,
regnet.RegNetY032,
regnet.RegNetY040,
regnet.RegNetY064,
regnet.RegNetY080,
regnet.RegNetY120,
regnet.RegNetY160,
regnet.RegNetY320,
],
),
}
TEST_IMAGE_PATH = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/tests/elephant.jpg"
)
_IMAGENET_CLASSES = 1000
# Add a flag to define which application module file is tested.
# This is set as an 'arg' in the build target to guarantee that
# it only triggers the tests of the application models in the module
# if that module file has been modified.
FLAGS = flags.FLAGS
flags.DEFINE_string("module", None, "Application module used in this test.")
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
test_image = data_utils.get_file("elephant.jpg", TEST_IMAGE_PATH)
img = image_utils.load_img(test_image, target_size=tuple(target_size))
x = image_utils.img_to_array(img)
return np.expand_dims(x, axis=0)
class ApplicationsLoadWeightTest(tf.test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
f"Shapes are different rank: {shape1} vs {shape2}"
)
if shape1 != shape2:
raise AssertionError(f"Shapes differ: {shape1} vs {shape2}")
def test_application_pretrained_weights_loading(self):
app_module = ARG_TO_MODEL[FLAGS.module][0]
apps = ARG_TO_MODEL[FLAGS.module][1]
for app in apps:
try:
model = app(weights="imagenet")
except Exception:
self.skipTest("TODO(b/227700184): Re-enable.")
self.assertShapeEqual(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant(model.input_shape[1:3])
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn("African_elephant", names[:3])
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/applications/applications_load_weight_test.py/0 | {
"file_path": "tf-keras/tf_keras/applications/applications_load_weight_test.py",
"repo_id": "tf-keras",
"token_count": 3245
} | 205 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet-RS models for TF-Keras.
Reference:
- [Revisiting ResNets: Improved Training and Scaling Strategies](
https://arxiv.org/pdf/2103.07579.pdf)
"""
import sys
from typing import Callable
from typing import Dict
from typing import List
from typing import Union
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import layers
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_URL = (
"https://storage.googleapis.com/tensorflow/keras-applications/resnet_rs/"
)
WEIGHT_HASHES = {
"resnet-rs-101-i160.h5": "544b3434d00efc199d66e9058c7f3379",
"resnet-rs-101-i160_notop.h5": "82d5b90c5ce9d710da639d6216d0f979",
"resnet-rs-101-i192.h5": "eb285be29ab42cf4835ff20a5e3b5d23",
"resnet-rs-101-i192_notop.h5": "f9a0f6b85faa9c3db2b6e233c4eebb5b",
"resnet-rs-152-i192.h5": "8d72a301ed8a6f11a47c4ced4396e338",
"resnet-rs-152-i192_notop.h5": "5fbf7ac2155cb4d5a6180ee9e3aa8704",
"resnet-rs-152-i224.h5": "31a46a92ab21b84193d0d71dd8c3d03b",
"resnet-rs-152-i224_notop.h5": "dc8b2cba2005552eafa3167f00dc2133",
"resnet-rs-152-i256.h5": "ba6271b99bdeb4e7a9b15c05964ef4ad",
"resnet-rs-152-i256_notop.h5": "fa79794252dbe47c89130f65349d654a",
"resnet-rs-200-i256.h5": "a76930b741884e09ce90fa7450747d5f",
"resnet-rs-200-i256_notop.h5": "bbdb3994718dfc0d1cd45d7eff3f3d9c",
"resnet-rs-270-i256.h5": "20d575825ba26176b03cb51012a367a8",
"resnet-rs-270-i256_notop.h5": "2c42ecb22e35f3e23d2f70babce0a2aa",
"resnet-rs-350-i256.h5": "f4a039dc3c421321b7fc240494574a68",
"resnet-rs-350-i256_notop.h5": "6e44b55025bbdff8f51692a023143d66",
"resnet-rs-350-i320.h5": "7ccb858cc738305e8ceb3c0140bee393",
"resnet-rs-350-i320_notop.h5": "ab0c1f9079d2f85a9facbd2c88aa6079",
"resnet-rs-420-i320.h5": "ae0eb9bed39e64fc8d7e0db4018dc7e8",
"resnet-rs-420-i320_notop.h5": "fe6217c32be8305b1889657172b98884",
"resnet-rs-50-i160.h5": "69d9d925319f00a8bdd4af23c04e4102",
"resnet-rs-50-i160_notop.h5": "90daa68cd26c95aa6c5d25451e095529",
}
DEPTH_TO_WEIGHT_VARIANTS = {
50: [160],
101: [160, 192],
152: [192, 224, 256],
200: [256],
270: [256],
350: [256, 320],
420: [320],
}
BLOCK_ARGS = {
50: [
{"input_filters": 64, "num_repeats": 3},
{"input_filters": 128, "num_repeats": 4},
{"input_filters": 256, "num_repeats": 6},
{"input_filters": 512, "num_repeats": 3},
],
101: [
{"input_filters": 64, "num_repeats": 3},
{"input_filters": 128, "num_repeats": 4},
{"input_filters": 256, "num_repeats": 23},
{"input_filters": 512, "num_repeats": 3},
],
152: [
{"input_filters": 64, "num_repeats": 3},
{"input_filters": 128, "num_repeats": 8},
{"input_filters": 256, "num_repeats": 36},
{"input_filters": 512, "num_repeats": 3},
],
200: [
{"input_filters": 64, "num_repeats": 3},
{"input_filters": 128, "num_repeats": 24},
{"input_filters": 256, "num_repeats": 36},
{"input_filters": 512, "num_repeats": 3},
],
270: [
{"input_filters": 64, "num_repeats": 4},
{"input_filters": 128, "num_repeats": 29},
{"input_filters": 256, "num_repeats": 53},
{"input_filters": 512, "num_repeats": 4},
],
350: [
{"input_filters": 64, "num_repeats": 4},
{"input_filters": 128, "num_repeats": 36},
{"input_filters": 256, "num_repeats": 72},
{"input_filters": 512, "num_repeats": 4},
],
420: [
{"input_filters": 64, "num_repeats": 4},
{"input_filters": 128, "num_repeats": 44},
{"input_filters": 256, "num_repeats": 87},
{"input_filters": 512, "num_repeats": 4},
],
}
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
[Revisiting ResNets: Improved Training and Scaling Strategies](
https://arxiv.org/pdf/2103.07579.pdf)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For ResNetRs, by default input preprocessing is included as a
part of the model (as a `Rescaling` layer), and thus
`tf.keras.applications.resnet_rs.preprocess_input` is actually a
pass-through function. In this use case, ResNetRS models expect their inputs
to be float tensors of pixels with values in the [0-255] range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to False.
With preprocessing disabled ResNetRS models expect their inputs to be float
tensors of pixels with values in the [-1, 1] range.
Args:
depth: Depth of ResNet network.
input_shape: optional shape tuple. It should have exactly 3 inputs
channels, and width and height should be no smaller than 32.
E.g. (200, 200, 3) would be one valid value.
bn_momentum: Momentum parameter for Batch Normalization layers.
bn_epsilon: Epsilon parameter for Batch Normalization layers.
activation: activation function.
se_ratio: Squeeze and Excitation layer ratio.
dropout_rate: dropout rate before final classifier layer.
drop_connect_rate: dropout rate at skip connections.
include_top: whether to include the fully-connected layer at the top of
the network.
block_args: list of dicts, parameters to construct block modules.
model_name: name of the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
weights: one of `None` (random initialization), `'imagenet'`
(pre-training on ImageNet), or the path to the weights file to be
loaded. Note: one model can have multiple imagenet variants
depending on input shape it was trained with. For input_shape
224x224 pass `imagenet-i224` as argument. By default, highest input
shape weights are downloaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network. Note: Input image
is normalized by ImageNet mean and standard deviation.
Defaults to `True`.
Returns:
A `keras.Model` instance.
"""
def Conv2DFixedPadding(filters, kernel_size, strides, name=None):
"""Conv2D block with fixed padding."""
if name is None:
counter = backend.get_uid("conv_")
name = f"conv_{counter}"
def apply(inputs):
if strides > 1:
inputs = fixed_padding(inputs, kernel_size)
return layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same" if strides == 1 else "valid",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name,
)(inputs)
return apply
def STEM(
bn_momentum: float = 0.0,
bn_epsilon: float = 1e-5,
activation: str = "relu",
name=None,
):
"""ResNet-D type STEM block."""
if name is None:
counter = backend.get_uid("stem_")
name = f"stem_{counter}"
def apply(inputs):
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
# First stem block
x = Conv2DFixedPadding(
filters=32, kernel_size=3, strides=2, name=name + "_stem_conv_1"
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_stem_batch_norm_1",
)(x)
x = layers.Activation(activation, name=name + "_stem_act_1")(x)
# Second stem block
x = Conv2DFixedPadding(
filters=32, kernel_size=3, strides=1, name=name + "_stem_conv_2"
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_stem_batch_norm_2",
)(x)
x = layers.Activation(activation, name=name + "_stem_act_2")(x)
# Final Stem block:
x = Conv2DFixedPadding(
filters=64, kernel_size=3, strides=1, name=name + "_stem_conv_3"
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_stem_batch_norm_3",
)(x)
x = layers.Activation(activation, name=name + "_stem_act_3")(x)
# Replace stem max pool:
x = Conv2DFixedPadding(
filters=64, kernel_size=3, strides=2, name=name + "_stem_conv_4"
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_stem_batch_norm_4",
)(x)
x = layers.Activation(activation, name=name + "_stem_act_4")(x)
return x
return apply
def SE(
in_filters: int, se_ratio: float = 0.25, expand_ratio: int = 1, name=None
):
"""Squeeze and Excitation block."""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
if name is None:
counter = backend.get_uid("se_")
name = f"se_{counter}"
def apply(inputs):
x = layers.GlobalAveragePooling2D(name=name + "_se_squeeze")(inputs)
if bn_axis == 1:
se_shape = (x.shape[-1], 1, 1)
else:
se_shape = (1, 1, x.shape[-1])
x = layers.Reshape(se_shape, name=name + "_se_reshape")(x)
num_reduced_filters = max(1, int(in_filters * 4 * se_ratio))
x = layers.Conv2D(
filters=num_reduced_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=True,
activation="relu",
name=name + "_se_reduce",
)(x)
x = layers.Conv2D(
filters=4
* in_filters
* expand_ratio, # Expand ratio is 1 by default
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=True,
activation="sigmoid",
name=name + "_se_expand",
)(x)
return layers.multiply([inputs, x], name=name + "_se_excite")
return apply
def BottleneckBlock(
filters: int,
strides: int,
use_projection: bool,
bn_momentum: float = 0.0,
bn_epsilon: float = 1e-5,
activation: str = "relu",
se_ratio: float = 0.25,
survival_probability: float = 0.8,
name=None,
):
"""Bottleneck block variant for residual networks with BN."""
if name is None:
counter = backend.get_uid("block_0_")
name = f"block_0_{counter}"
def apply(inputs):
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
shortcut = inputs
if use_projection:
filters_out = filters * 4
if strides == 2:
shortcut = layers.AveragePooling2D(
pool_size=(2, 2),
strides=(2, 2),
padding="same",
name=name + "_projection_pooling",
)(inputs)
shortcut = Conv2DFixedPadding(
filters=filters_out,
kernel_size=1,
strides=1,
name=name + "_projection_conv",
)(shortcut)
else:
shortcut = Conv2DFixedPadding(
filters=filters_out,
kernel_size=1,
strides=strides,
name=name + "_projection_conv",
)(inputs)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_projection_batch_norm",
)(shortcut)
# First conv layer:
x = Conv2DFixedPadding(
filters=filters, kernel_size=1, strides=1, name=name + "_conv_1"
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "batch_norm_1",
)(x)
x = layers.Activation(activation, name=name + "_act_1")(x)
# Second conv layer:
x = Conv2DFixedPadding(
filters=filters,
kernel_size=3,
strides=strides,
name=name + "_conv_2",
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_batch_norm_2",
)(x)
x = layers.Activation(activation, name=name + "_act_2")(x)
# Third conv layer:
x = Conv2DFixedPadding(
filters=filters * 4, kernel_size=1, strides=1, name=name + "_conv_3"
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + "_batch_norm_3",
)(x)
if 0 < se_ratio < 1:
x = SE(filters, se_ratio=se_ratio, name=name + "_se")(x)
# Drop connect
if survival_probability:
x = layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
name=name + "_drop",
)(x)
x = layers.Add()([x, shortcut])
return layers.Activation(activation, name=name + "_output_act")(x)
return apply
def BlockGroup(
filters,
strides,
num_repeats,
se_ratio: float = 0.25,
bn_epsilon: float = 1e-5,
bn_momentum: float = 0.0,
activation: str = "relu",
survival_probability: float = 0.8,
name=None,
):
"""Create one group of blocks for the ResNet model."""
if name is None:
counter = backend.get_uid("block_group_")
name = f"block_group_{counter}"
def apply(inputs):
# Only the first block per block_group uses projection shortcut and
# strides.
x = BottleneckBlock(
filters=filters,
strides=strides,
use_projection=True,
se_ratio=se_ratio,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum,
activation=activation,
survival_probability=survival_probability,
name=name + "_block_0_",
)(inputs)
for i in range(1, num_repeats):
x = BottleneckBlock(
filters=filters,
strides=1,
use_projection=False,
se_ratio=se_ratio,
activation=activation,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum,
survival_probability=survival_probability,
name=name + f"_block_{i}_",
)(x)
return x
return apply
def get_survival_probability(init_rate, block_num, total_blocks):
"""Get survival probability based on block number and initial rate."""
return init_rate * float(block_num) / total_blocks
def allow_bigger_recursion(target_limit: int):
"""Increase default recursion limit to create larger models."""
current_limit = sys.getrecursionlimit()
if current_limit < target_limit:
sys.setrecursionlimit(target_limit)
def fixed_padding(inputs, kernel_size):
"""Pad the input along the spatial dimensions independently of input
size."""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
# Use ZeroPadding as to avoid TFOpLambda layer
padded_inputs = layers.ZeroPadding2D(
padding=((pad_beg, pad_end), (pad_beg, pad_end))
)(inputs)
return padded_inputs
def ResNetRS(
depth: int,
input_shape=None,
bn_momentum=0.0,
bn_epsilon=1e-5,
activation: str = "relu",
se_ratio=0.25,
dropout_rate=0.25,
drop_connect_rate=0.2,
include_top=True,
block_args: List[Dict[str, int]] = None,
model_name="resnet-rs",
pooling=None,
weights="imagenet",
input_tensor=None,
classes=1000,
classifier_activation: Union[str, Callable] = "softmax",
include_preprocessing=True,
):
"""Build Resnet-RS model, given provided parameters.
Args:
depth: Depth of ResNet network.
input_shape: optional shape tuple. It should have exactly 3 inputs
channels, and width and height should be no smaller than 32. E.g.
(200, 200, 3) would be one valid value.
bn_momentum: Momentum parameter for Batch Normalization layers.
bn_epsilon: Epsilon parameter for Batch Normalization layers.
activation: activation function.
se_ratio: Squeeze and Excitation layer ratio.
dropout_rate: dropout rate before final classifier layer.
drop_connect_rate: dropout rate at skip connections.
include_top: whether to include the fully-connected layer at the top of
the network.
block_args: list of dicts, parameters to construct block modules.
model_name: name of the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the
output of the last convolutional layer, and thus the output of the
model will be a 2D tensor.
- `max` means that global max pooling will be applied.
weights: one of `None` (random initialization), `'imagenet'`
(pre-training on ImageNet), or the path to the weights file to be
loaded. Note- one model can have multiple imagenet variants depending
on input shape it was trained with. For input_shape 224x224 pass
`imagenet-i224` as argument. By default, highest input shape weights
are downloaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network. Note - Input image
is normalized by ImageNet mean and standard deviation.
Defaults to `True`.
Returns:
A `tf.keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`, or invalid input
shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
# Validate parameters
available_weight_variants = DEPTH_TO_WEIGHT_VARIANTS[depth]
if weights == "imagenet":
max_input_shape = max(available_weight_variants)
# `imagenet` argument without explicit weights input size.
# Picking weights trained with biggest available shape
weights = f"{weights}-i{max_input_shape}"
weights_allow_list = [f"imagenet-i{x}" for x in available_weight_variants]
if not (
weights in {*weights_allow_list, None} or tf.io.gfile.exists(weights)
):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `'imagenet'` "
"(pre-training on ImageNet, with highest available input shape),"
" or the path to the weights file to be loaded. "
f"For ResNetRS{depth} the following weight variants are "
f"available {weights_allow_list} (default=highest)."
f" Received weights={weights}"
)
if weights in weights_allow_list and include_top and classes != 1000:
raise ValueError(
"If using `weights` as `'imagenet'` or any "
f"of {weights_allow_list} "
"with `include_top` as true, `classes` should be 1000. "
f"Received classes={classes}"
)
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
# Define input tensor
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = img_input
if include_preprocessing:
num_channels = input_shape[bn_axis - 1]
x = layers.Rescaling(scale=1.0 / 255)(x)
if num_channels == 3:
x = layers.Normalization(
mean=[0.485, 0.456, 0.406],
variance=[0.229**2, 0.224**2, 0.225**2],
axis=bn_axis,
)(x)
# Build stem
x = STEM(
bn_momentum=bn_momentum, bn_epsilon=bn_epsilon, activation=activation
)(x)
# Build blocks
if block_args is None:
block_args = BLOCK_ARGS[depth]
for i, args in enumerate(block_args):
survival_probability = get_survival_probability(
init_rate=drop_connect_rate,
block_num=i + 2,
total_blocks=len(block_args) + 1,
)
x = BlockGroup(
filters=args["input_filters"],
activation=activation,
strides=(1 if i == 0 else 2),
num_repeats=args["num_repeats"],
se_ratio=se_ratio,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
survival_probability=survival_probability,
name=f"BlockGroup{i + 2}_",
)(x)
# Build head:
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Download weights
if weights in weights_allow_list:
weights_input_shape = weights.split("-")[-1] # e. g. "i160"
weights_name = f"{model_name}-{weights_input_shape}"
if not include_top:
weights_name += "_notop"
filename = f"{weights_name}.h5"
download_url = BASE_WEIGHTS_URL + filename
weights_path = data_utils.get_file(
fname=filename,
origin=download_url,
cache_subdir="models",
file_hash=WEIGHT_HASHES[filename],
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export(
"keras.applications.resnet_rs.ResNetRS50", "keras.applications.ResNetRS50"
)
def ResNetRS50(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS50 model."""
return ResNetRS(
depth=50,
include_top=include_top,
drop_connect_rate=0.0,
dropout_rate=0.25,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-50",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS101", "keras.applications.ResNetRS101"
)
def ResNetRS101(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS101 model."""
return ResNetRS(
depth=101,
include_top=include_top,
drop_connect_rate=0.0,
dropout_rate=0.25,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-101",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS152", "keras.applications.ResNetRS152"
)
def ResNetRS152(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS152 model."""
return ResNetRS(
depth=152,
include_top=include_top,
drop_connect_rate=0.0,
dropout_rate=0.25,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-152",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS200", "keras.applications.ResNetRS200"
)
def ResNetRS200(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS200 model."""
return ResNetRS(
depth=200,
include_top=include_top,
drop_connect_rate=0.1,
dropout_rate=0.25,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-200",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS270", "keras.applications.ResNetRS270"
)
def ResNetRS270(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS-270 model."""
allow_bigger_recursion(1300)
return ResNetRS(
depth=270,
include_top=include_top,
drop_connect_rate=0.1,
dropout_rate=0.25,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-270",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS350", "keras.applications.ResNetRS350"
)
def ResNetRS350(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS350 model."""
allow_bigger_recursion(1500)
return ResNetRS(
depth=350,
include_top=include_top,
drop_connect_rate=0.1,
dropout_rate=0.4,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-350",
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.resnet_rs.ResNetRS420", "keras.applications.ResNetRS420"
)
def ResNetRS420(
include_top=True,
weights="imagenet",
classes=1000,
input_shape=None,
input_tensor=None,
pooling=None,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Build ResNet-RS420 model."""
allow_bigger_recursion(1800)
return ResNetRS(
depth=420,
include_top=include_top,
dropout_rate=0.4,
drop_connect_rate=0.1,
weights=weights,
classes=classes,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
classifier_activation=classifier_activation,
model_name="resnet-rs-420",
include_preprocessing=include_preprocessing,
)
@keras_export("keras.applications.resnet_rs.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the ResnetRS model
implementation. Users are no longer required to call this method to
normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. `None` means
the global setting `tf.keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").
Defaults to `None`.
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export("keras.applications.resnet_rs.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
ResNetRS50.__doc__ = BASE_DOCSTRING.format(name="ResNetRS50")
ResNetRS101.__doc__ = BASE_DOCSTRING.format(name="ResNetRS101")
ResNetRS152.__doc__ = BASE_DOCSTRING.format(name="ResNetRS152")
ResNetRS200.__doc__ = BASE_DOCSTRING.format(name="ResNetRS200")
ResNetRS270.__doc__ = BASE_DOCSTRING.format(name="ResNetRS270")
ResNetRS350.__doc__ = BASE_DOCSTRING.format(name="ResNetRS350")
ResNetRS420.__doc__ = BASE_DOCSTRING.format(name="ResNetRS420")
| tf-keras/tf_keras/applications/resnet_rs.py/0 | {
"file_path": "tf-keras/tf_keras/applications/resnet_rs.py",
"repo_id": "tf-keras",
"token_count": 15247
} | 206 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark tests for CPU performance of TF-Keras models."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.benchmarks import benchmark_util
# Loss function and optimizer.
_LOSS = "binary_crossentropy"
_OPTIMIZER = "rmsprop"
class KerasModelCPUBenchmark(
tf.test.Benchmark, metaclass=tf.__internal__.test.ParameterizedBenchmark
):
"""Required Arguments for measure_performance.
x: Input data, it could be Numpy or load from tfds.
y: Target data. If `x` is a dataset, generator instance,
`y` should not be specified.
loss: Loss function for model.
optimizer: Optimizer for model.
Other details can see in `measure_performance()` method of
benchmark_util.
"""
# The parameters of each benchmark is a tuple:
# (benchmark_name_suffix, batch_size, run_iters).
# benchmark_name_suffix: The suffix of the benchmark test name with
# convention `{bs}_{batch_size}`.
# batch_size: Integer. Number of samples per gradient update.
# run_iters: Integer. Number of iterations to run the
# performance measurement.
_benchmark_parameters = [
("bs_32", 32, 3),
("bs_64", 64, 2),
("bs_128", 128, 2),
("bs_256", 256, 1),
("bs_512", 512, 1),
]
def _mnist_mlp(self):
"""Simple MLP model."""
model = keras.Sequential()
model.add(
keras.layers.Dense(512, activation="relu", input_shape=(784,))
)
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10, activation="softmax"))
return model
def _mnist_convnet(self):
"""Simple Convnet model."""
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(keras.layers.Conv2D(64, (3, 3), activation="relu"))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(10, activation="softmax"))
return model
def _imdb_lstm(self):
"""Simple LSTM model."""
model = keras.Sequential()
model.add(keras.layers.Embedding(20000, 128))
model.add(keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(keras.layers.Dense(1, activation="sigmoid"))
return model
def benchmark_mnist_mlp(self, batch_size, run_iters):
"""Benchmark for MLP model on synthetic mnist data."""
mlp_x = np.random.random((5000, 784))
mlp_y = np.random.random((5000, 10))
metrics, wall_time, extras = benchmark_util.measure_performance(
self._mnist_mlp,
x=mlp_x,
y=mlp_y,
batch_size=batch_size,
run_iters=run_iters,
optimizer=_OPTIMIZER,
loss=_LOSS,
)
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_mnist_convnet(self, batch_size, run_iters):
"""Benchmark for Convnet model on synthetic mnist data."""
convnet_x = np.random.random((5000, 28, 28, 1))
convnet_y = np.random.random((5000, 10))
metrics, wall_time, extras = benchmark_util.measure_performance(
self._mnist_convnet,
x=convnet_x,
y=convnet_y,
batch_size=batch_size,
run_iters=run_iters,
optimizer=_OPTIMIZER,
loss=_LOSS,
)
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_imdb_lstm(self, batch_size, run_iters):
"""Benchmark for LSTM model on synthetic imdb review dataset."""
lstm_x = np.random.randint(0, 1999, size=(2500, 100))
lstm_y = np.random.random((2500, 1))
metrics, wall_time, extras = benchmark_util.measure_performance(
self._imdb_lstm,
x=lstm_x,
y=lstm_y,
batch_size=batch_size,
run_iters=run_iters,
optimizer=_OPTIMIZER,
loss=_LOSS,
)
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/keras_cpu_benchmark_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_cpu_benchmark_test.py",
"repo_id": "tf-keras",
"token_count": 2425
} | 207 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark tests for TF-Keras metrics memory consumption."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
try:
import memory_profiler
except ImportError:
memory_profiler = None
class KerasMetricMemoryBenchmark(tf.test.Benchmark):
# This test is added to measure the memory footprint for
# metrics_utils._update_confusion_matrix_variables_optimized().
def benchmark_auc_memory_usage(self):
if memory_profiler is None:
self.skipTest("Skip test since memory_profiler is not available.")
with tf.compat.forward_compatibility_horizon(2021, 6, 9):
self.y_true = np.random.randint(2, size=(1024, 1024))
self.y_pred = np.random.rand(1024, 1024)
memory_usage_1 = memory_profiler.memory_usage(
(self.even_thresholds_auc)
)
memory_usage_2 = memory_profiler.memory_usage(
(self.uneven_thresholds_auc)
)
# memory usage is a list of number which sampled when running the
# function The pure memory consumption is approximately max(usage) -
# min(usage)
memory_usage_1 = max(memory_usage_1) - min(memory_usage_1)
memory_usage_2 = max(memory_usage_2) - min(memory_usage_2)
metrics = {
"even_threshold_memory_usage": memory_usage_1,
"uneven_threshold_memory_usage": memory_usage_2,
}
self.report_benchmark(iters=1, metrics=metrics)
def even_thresholds_auc(self):
auc = keras.metrics.AUC(num_thresholds=200)
self.assertTrue(auc._thresholds_distributed_evenly)
auc(self.y_true, self.y_pred)
def uneven_thresholds_auc(self):
num_thresholds = 200
thresholds = [x / (num_thresholds - 1) for x in range(num_thresholds)]
thresholds[100] += 1 / 200
thresholds = thresholds[1:-1]
auc = keras.metrics.AUC(thresholds=thresholds)
self.assertFalse(auc._thresholds_distributed_evenly)
self.assertEqual(auc.num_thresholds, num_thresholds)
auc(self.y_true, self.y_pred)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/metrics_memory_benchmark_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/metrics_memory_benchmark_test.py",
"repo_id": "tf-keras",
"token_count": 1146
} | 208 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Callbacks: utilities called at certain points during model training."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import callbacks
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.callbacks.TensorBoard"])
class TensorBoard(callbacks.TensorBoard):
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for
histograms computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved. If set to 0, embeddings won't be computed. Data
to be visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved.
[Here are details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single
input) or list of Numpy arrays (if the model has multiple inputs).
Learn more about embeddings [in this guide](
https://www.tensorflow.org/programmers_guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The
same applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
def __init__(
self,
log_dir="./logs",
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq="epoch",
profile_batch=2,
):
# Don't call super's init since it is an eager-only version.
callbacks.Callback.__init__(self)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and tf.executing_eagerly():
logging.warning(
UserWarning(
"Weight and gradient histograms not supported for eager"
"execution, setting `histogram_freq` to `0`."
)
)
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == "batch":
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if tf.executing_eagerly():
self.writer = tf.summary.create_file_writer(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
tf.summary.graph(backend.get_graph())
elif self.write_graph:
self.writer = tf.compat.v1.summary.FileWriter(
self.log_dir, backend.get_graph()
)
else:
self.writer = tf.compat.v1.summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(":", "_")
tf.compat.v1.summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = tf.compat.v1.squeeze(weight)
shape = tuple(w_img.shape)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.compat.v1.transpose(w_img)
shape = tuple(w_img.shape)
w_img = tf.reshape(
w_img, [1, shape[0], shape[1], 1]
)
elif len(shape) == 3: # convnet case
if backend.image_data_format() == "channels_last":
# switch to channels_first to display
# every kernel as a separate image
w_img = tf.compat.v1.transpose(
w_img, perm=[2, 0, 1]
)
shape = tuple(w_img.shape)
w_img = tf.reshape(
w_img, [shape[0], shape[1], shape[2], 1]
)
elif len(shape) == 1: # bias case
w_img = tf.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = tuple(w_img.shape)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf.compat.v1.summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(":", "_")
grads = model.optimizer.get_gradients(
model.total_loss, weight
)
def is_indexed_slices(grad):
return type(grad).__name__ == "IndexedSlices"
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf.compat.v1.summary.histogram(
f"{mapped_weight_name}_grad", grads
)
if hasattr(layer, "output"):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf.compat.v1.summary.histogram(
f"{layer.name}_out_{i}", output
)
else:
tf.compat.v1.summary.histogram(
f"{layer.name}_out", layer.output
)
def set_model(self, model):
"""Sets TF-Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not tf.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf.compat.v1.summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tf_keras.engine import (
training_utils_v1,
)
self.embeddings_data = training_utils_v1.standardize_input_data(
self.embeddings_data, model.input_names
)
# If embedding_layer_names are not provided, get all of the
# embedding layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == "Embedding"
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = tf.compat.v1.placeholder(tf.int32)
self.step = step = tf.compat.v1.placeholder(tf.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = tf.reshape(
embedding_input, (step, int(embedding_size))
)
shape = (
self.embeddings_data[0].shape[0],
int(embedding_size),
)
embedding = tf.Variable(
tf.zeros(shape), name=layer.name + "_embedding"
)
embeddings_vars[layer.name] = embedding
batch = tf.compat.v1.assign(
embedding[batch_id : batch_id + step], embedding_input
)
self.assign_embeddings.append(batch)
self.saver = tf.compat.v1.train.Saver(
list(embeddings_vars.values())
)
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
# isort: off
from tensorboard.plugins import projector
except ImportError:
raise ImportError(
"Failed to import TensorBoard. Please make sure that "
'TensorBoard integration is complete."'
)
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for
# this because TensorBoard dependency assumes TensorFlow package is
# installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (
embeddings_metadata is not None
and layer_name in embeddings_metadata
):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Args:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if tf.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), tf.summary.record_if(True):
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
tf.summary.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf.compat.v1.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_batch_begin(self, batch, logs=None):
if self._total_batches_seen == self._profile_batch - 1:
self._start_profiler()
def on_train_batch_end(self, batch, logs=None):
return self.on_batch_end(batch, logs)
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get("size", 1)
samples_seen_since = (
self._samples_seen - self._samples_seen_at_last_write
)
if (
self.update_freq != "epoch"
and samples_seen_since >= self.update_freq
):
batch_logs = {
("batch_" + k): v
for k, v in logs.items()
if k not in ["batch", "size", "num_steps"]
}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
self._stop_profiler()
def on_train_begin(self, logs=None):
pass
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch
count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged
] = self._fetch_callback
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar
summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {
("epoch_" + k): v
for k, v in logs.items()
if k not in ["batch", "size", "num_steps"]
}
if self.update_freq == "epoch":
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError(
"To visualize embeddings, embeddings_data must be provided."
)
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = backend.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {
self.model.input: embeddings_data[0][batch]
}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(backend.learning_phase(), int):
feed_dict[backend.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(
sess,
os.path.join(self.log_dir, "keras_embedding.ckpt"),
epoch,
)
i += self.batch_size
def on_train_end(self, logs=None):
self._stop_profiler()
self.writer.close()
def _start_profiler(self):
"""Starts the profiler if currently inactive."""
if self._profiler_started:
return
try:
tf.profiler.experimental.start(logdir=self.log_dir)
self._profiler_started = True
except tf.errors.AlreadyExistsError as e:
# Profiler errors should not be fatal.
logging.error("Failed to start profiler: %s", e.message)
def _stop_profiler(self):
"""Stops the profiler if currently active."""
if not self._profiler_started:
return
try:
tf.profiler.experimental.stop()
except tf.errors.UnavailableError as e:
# Profiler errors should not be fatal.
logging.error("Failed to stop profiler: %s", e.message)
finally:
self._profiler_started = False
| tf-keras/tf_keras/callbacks_v1.py/0 | {
"file_path": "tf-keras/tf_keras/callbacks_v1.py",
"repo_id": "tf-keras",
"token_count": 10754
} | 209 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import callbacks
from tf_keras import metrics as metrics_module
from tf_keras import optimizers
from tf_keras.distribute import distribute_coordinator_utils as dc
from tf_keras.distribute import distributed_training_utils as dist_utils
from tf_keras.engine import training_utils_v1
from tf_keras.optimizers.legacy import optimizer_v2
from tf_keras.utils import tf_contextlib
from tf_keras.utils.mode_keys import ModeKeys
# isort: off
from tensorflow.python.platform import tf_logging as logging
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if tf.compat.v1.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not tf.compat.v1.executing_eagerly_outside_functions():
backend.get_session(assign_ops).run(assign_ops)
def unwrap_values(
distribution_strategy,
grouped_inputs,
grouped_outputs,
grouped_updates=None,
grouped_session_args=None,
with_loss_tensor=False,
):
"""Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses
on the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test
function that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test
function that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced
loss tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_per_replica_values(
distribution_strategy, grouped_inputs
)
all_outputs = unwrap_outputs(
distribution_strategy, grouped_outputs, with_loss_tensor
)
if grouped_updates:
all_updates = flatten_per_replica_values(
distribution_strategy, grouped_updates
)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get("feed_dict")
if grouped_feed_dict:
all_session_args["feed_dict"] = flatten_per_replica_values(
distribution_strategy, grouped_feed_dict
)
grouped_fetches = grouped_session_args.get("fetches")
if grouped_fetches:
all_session_args["fetches"] = flatten_per_replica_values(
distribution_strategy, grouped_fetches
)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_output_dict(strategy, grouped_outputs, mode):
"""Unwrap the list of outputs contained in the PerReplica parameters."""
if mode == ModeKeys.PREDICT:
return flatten_per_replica_values(strategy, grouped_outputs)
# In the case of fit/eval, the grouped_outputs is a dict, whereas in
# predict, the output is as same structure as model output. They need to be
# treated differently
total_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, grouped_outputs["total_loss"][0], axis=None
)
output_losses = flatten_per_replica_values(
strategy, grouped_outputs["output_losses"]
)
metrics = flatten_per_replica_values(strategy, grouped_outputs["metrics"])
batch_size = strategy.reduce(
tf.distribute.ReduceOp.SUM, grouped_outputs["batch_size"], axis=None
)
if (
backend.is_tpu_strategy(strategy)
and tf.compat.v1.executing_eagerly_outside_functions()
):
# Choose 1 value per replica in the TPU case since all replicas produce
# the same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the
# graph code path as well.
output_losses = output_losses[:: strategy.num_replicas_in_sync]
metrics = metrics[:: strategy.num_replicas_in_sync]
return {
"total_loss": [total_loss],
"output_losses": output_losses,
"metrics": metrics,
"batch_size": batch_size,
}
def unwrap_outputs(
distribution_strategy, grouped_outputs, with_loss_tensor=False
):
"""Unwrap the list of outputs contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of outputs on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses
on the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
grouped_outputs: PerReplica outputs returned from the train or test
function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced
loss tensor as one of the outputs.
Returns:
Values of each of the PerReplica outputs.
"""
if not with_loss_tensor:
return flatten_per_replica_values(
distribution_strategy, grouped_outputs
)
if not isinstance(grouped_outputs, list):
grouped_outputs = [grouped_outputs]
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(
tf.distribute.ReduceOp.SUM, grouped_outputs[0], axis=None
)
all_outputs = flatten_per_replica_values(
distribution_strategy, grouped_outputs[1:]
)
if (
backend.is_tpu_strategy(distribution_strategy)
and tf.compat.v1.executing_eagerly_outside_functions()
):
# Choose 1 value per replica in the TPU case since all replicas produce
# the same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the
# graph code path as well.
all_outputs = all_outputs[:: distribution_strategy.num_replicas_in_sync]
return [loss] + all_outputs
def flatten_per_replica_values(distribution_strategy, per_replica_values):
"""Unwraps and flattens a nest of PerReplica parameters.
PerReplica values have one value associated with each device. Each entry in
the PerReplica dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerReplica value or a list
of PerReplica values and return all the values in the PerReplica dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
per_replica_values: List of PerReplica object or a single PerReplica
object.
Returns:
List of values of all the PerReplica objects.
"""
# This function takes a PerReplica object or a list of PerReplica objects
# and returns all the values associated with it.
return [
e
for flattened in tf.nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)
]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of
the callbacks passed.
ValueError: If `write_grads` is one of the parameters passed as part of
the TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if isinstance(
callback,
(callbacks.LearningRateScheduler, callbacks.ReduceLROnPlateau),
):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError(
"You must specify a TF-Keras Optimizer V2 when using "
"%s callback with DistributionStrategy." % callback
)
# If users want to use the TensorBoard callback they cannot use
# certain features of the callback that involve accessing model
# attributes and running ops.
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, "write_grads", False):
logging.warning(
UserWarning(
"`write_grads` in the TensorBoard callback is not "
"supported when using DistributionStrategy. "
"Setting `write_grads` to `False`."
)
)
callback.write_grads = False
def validate_distributed_dataset_inputs(
distribution_strategy, x, y, sample_weights=None
):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For
example, when we use `MirroredStrategy` this is a PerReplica object
with a tensor for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_replica_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_replica_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_replica_inputs(
distribution_strategy, sample_weights
)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_replica_inputs(distribution_strategy, x):
"""Validates PerReplica dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerReplica objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerReplica objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_replica_list` is not a
tensor.
"""
# Convert the inputs and targets into a list of PerReplica objects.
per_replica_list = tf.nest.flatten(x)
x_values_list = []
for x in per_replica_list:
# At this point x should contain only tensors.
x_values = distribution_strategy.unwrap(x)
for value in x_values:
if not tf.is_tensor(value):
raise ValueError(
"Dataset input to the model should be tensors instead "
"they are of type {}".format(type(value))
)
if not tf.executing_eagerly():
# Validate that the shape and dtype of all the elements in x are the
# same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError(
"Input tensor dtypes do not match for distributed tensor"
" inputs {}".format(x)
)
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].shape.as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].shape.as_list():
raise ValueError(
"Input tensor shapes do not match for distributed tensor"
" inputs {}".format(x)
)
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = backend._get_variables(backend.get_graph())
candidate_vars = []
for v in all_variables:
if not getattr(v, "_keras_initialized", False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[tf.compat.v1.is_variable_initialized(v) for v in candidate_vars]
)
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be
initialized."""
backend._initialize_variables(backend._get_session())
def validate_inputs(x, y):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if isinstance(x, tf.compat.v1.data.Iterator) or isinstance(
y, tf.compat.v1.data.Iterator
):
raise ValueError(
"`DistributionStrategy` does not support inputs of type "
"Iterator. You must pass a `tf.data.Dataset` object or a "
"numpy array as input."
)
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = tf.nest.flatten(tf.compat.v1.data.get_output_shapes(dataset))
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def process_batch_and_step_size(
strategy, inputs, batch_size, steps_per_epoch, mode, validation_split=0.0
):
"""Process the batch size and step size based on input and dist strategy."""
first_x_value = tf.nest.flatten(inputs)[0]
if isinstance(first_x_value, np.ndarray):
num_samples = first_x_value.shape[0]
if validation_split and 0.0 < validation_split < 1.0:
num_samples = int(num_samples * (1 - validation_split))
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the constraint to consume all the training samples.
steps_per_epoch, batch_size = get_input_params(
strategy, num_samples, steps_per_epoch, batch_size, mode=mode
)
return batch_size, steps_per_epoch
def get_input_params(
distribution_strategy, num_samples, steps, batch_size, mode=None
):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
num_samples: The number of samples from which we determine the batch size
and steps.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we support
partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not dist_utils.global_batch_size_supported(
distribution_strategy
)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except
# for `fit()` on TPUStrategy.
# In graph mode, the zero batch case in batch norm is not handled due to
# XLA-GPU regression. Uneven batch sizes are not allowed except
# for `test()` and `predict()` on TPUStrategy.
if tf.executing_eagerly():
allow_partial_batch = (
mode != ModeKeys.TRAIN
or not backend.is_tpu_strategy(distribution_strategy)
)
else:
allow_partial_batch = mode == ModeKeys.TRAIN or (
(mode == ModeKeys.PREDICT or mode == ModeKeys.TEST)
and backend.is_tpu_strategy(distribution_strategy)
)
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose
# the global batch size as the minimum of number of samples and 32.
# 32 is chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch
# size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if allow_partial_batch:
steps = np.ceil(num_samples / global_batch_size).astype(int)
else:
if num_samples % global_batch_size:
raise ValueError(
"The number of samples %s is not divisible by "
"batch size %s." % (num_samples, global_batch_size)
)
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError(
"The number of samples %s is not divisible by "
"steps %s. Please change the number of steps to a "
"value that can consume all the samples"
% (num_samples, steps)
)
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch
# size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = (
global_batch_size * (steps - 1) + 1 if steps > 1 else 0
)
if num_samples < min_num_samples:
raise ValueError(
"Number of samples %s is less than samples required "
"for specified batch_size %s and steps %s"
% (num_samples, global_batch_size, steps)
)
# We need to return the per replica or global batch size based on the
# strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
"The batch size (%s) could not be sharded evenly across the "
"sync replicas (%s) in the distribution strategy."
% (
global_batch_size,
distribution_strategy.num_replicas_in_sync,
)
)
batch_size = (
global_batch_size // distribution_strategy.num_replicas_in_sync
)
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = tf.nest.flatten(tf.compat.v1.data.get_output_shapes(iterator))
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = tf.group(iterator.initializer)
if not tf.executing_eagerly():
backend.get_session((init_op,)).run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that.
# Empty elements are going to get filtered out as part of the flattening.
if len(tf.nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(tf.nest.flatten(next_element)) == (
len(model.inputs) + len(model.outputs)
):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights
)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Args:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
if backend.is_tpu_strategy(strategy):
if sample_weights is not None:
raise ValueError("TPUStrategy does not support sample weights.")
# When the inputs are dict, then we want to flatten it in the same order as
# the input layers, such that the data are fed into the input layers in the
# correct order.
if isinstance(inputs, dict):
inputs = [inputs[key] for key in model._feed_input_names]
if is_distributing_by_cloning(model):
inputs = flatten_per_replica_values(strategy, inputs)
targets = flatten_per_replica_values(strategy, targets)
# Expand 1-dimensional inputs.
# TODO(b/124535720): Remove once this standarize data logic is shared
# with main flow.
inputs, targets = tf.nest.map_structure(
training_utils_v1.standardize_single_array, (inputs, targets)
)
else:
inputs = training_utils_v1.ModelInputs(inputs).as_list()
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
elif sample_weights is not None and is_distributing_by_cloning(model):
if tf.executing_eagerly() and not model._compile_distribution:
raise NotImplementedError(
"`sample_weight` is not supported when using "
"tf.distribute.Strategy in eager mode and "
"cloning=True."
)
sample_weights = flatten_per_replica_values(strategy, sample_weights)
ins = [inputs, targets, sample_weights]
return tuple(ins)
def is_distributing_by_cloning(model):
"""Decide whether this model is going to be distributed via cloning.
We are going to distribute the model by cloning in graph mode.
Args:
model: TF-Keras model to distribute.
Returns:
True if the `model` is going to be distributed using cloning and False
otherwise.
"""
if (
backend.is_tpu_strategy(model._distribution_strategy)
and tf.executing_eagerly
): # b/137580852
return False
elif tf.compat.v1.executing_eagerly_outside_functions():
return bool(model._compile_distribution)
return True
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the
# first time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new TF-Keras model while sharing the variables from the old
graph. Building a new sub-graph is required since the original keras model
creates placeholders for the input and the output that are not accessible
till we call iterator.get_next() inside the step_fn for
`fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model
guarantee that we're using Strategy variables and any updates on either
model are reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new
model as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from tf_keras import models
from tf_keras.engine import sequential
# We rely on the internal methods to avoid having share_weights weights in
# the public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
model, input_tensors=inputs, layer_fn=models.share_weights
)
else:
updated_model = models._clone_functional_model(
model, input_tensors=inputs, layer_fn=models.share_weights
)
# Callable losses added directly to a functional Model need to be added
# here.
updated_model._callable_losses = model._callable_losses
# Recast all low precision outputs back to float32 since we only casted the
# inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == tf.bfloat16:
return tf.cast(output, tf.float32)
else:
return output
updated_model.outputs = [
_upcast_low_precision_outputs(o) for o in updated_model.outputs
]
if isinstance(targets, tuple):
targets = tf.nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics
),
target_tensors=targets,
)
return updated_model
def _build_distributed_network(
model, strategy, mode, inputs=None, targets=None
):
"""Create a cloned model on each replica."""
with backend.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica, args=(model, mode, inputs, targets)
)
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tf_keras import models
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can
# preserve precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == tf.bfloat16:
return tf.cast(output, tf.float32)
else:
return output
cloned_model.outputs = [
_upcast_low_precision_outputs(o) for o in cloned_model.outputs
]
if isinstance(targets, tuple):
targets = tf.nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics
),
target_tensors=targets,
)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with backend.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets)
)
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes or reuses function to run one step of distributed model
execution."""
if is_distributing_by_cloning(model):
return _make_execution_function_with_cloning(model, mode)
distributed_function = get_distributed_function(model, mode)
if distributed_function:
return distributed_function
distribution_function = _make_execution_function_without_cloning(
model, mode
)
set_distributed_function(model, mode, distribution_function)
return distribution_function
def _make_execution_function_without_cloning(model, mode):
"""Creates a function to run one step of distributed model execution."""
strategy = model._distribution_strategy
with strategy.scope():
per_replica_function = _make_replica_execution_function(model, mode)
def distributed_function(input_fn):
"""A single step of the distributed execution across replicas."""
x, y, sample_weights = input_fn()
# Call `Model.{train,test,predict}_on_batch` on every replica
# passing PerReplicas as arguments. On every replica inside this
# call, each PerReplica object will return the value for that
# replica. The outputs are PerReplicas too.
outputs = strategy.run(
per_replica_function, args=(x, y, sample_weights)
)
# Out of PerReplica outputs reduce or pick values to return.
all_outputs = unwrap_outputs(
strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT)
)
return all_outputs
if not model.run_eagerly:
distributed_function = tf.function(distributed_function)
def execution_function(input_fn):
# `numpy` translates Tensors to values in Eager mode.
return [out.numpy() for out in distributed_function(input_fn)]
else:
execution_function = distributed_function
return execution_function
def _make_replica_execution_function(model, mode):
"""A single step of the distributed execution on a replica."""
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
def predict_on_batch(x, y=None, sample_weights=None):
del y, sample_weights
return model.predict_on_batch(x)
func = predict_on_batch
if mode != ModeKeys.PREDICT:
# `reset_metrics` is set to False to maintain stateful metrics across
# batch-level calls.
func = functools.partial(func, reset_metrics=False)
return func
def _make_replicated_models_with_cloning(model, mode):
"""Build models on each replica."""
strategy = model._distribution_strategy
# If distributed_model is not built, create one for `mode`.
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _make_execution_function_with_cloning(model, mode):
"""Clones or re-uses models to run one step of distributed model
execution."""
distributed_model = get_distributed_model(model, mode)
# TODO(b/134069401): Create a cache for the distributed model and exec
# function that incorporates additional attributes to be part of the cache
# key than just the mode.
# If distributed model for a particular `mode` is already built, use the
# `_distribution_function` on that distributed model.
# If you have updated the sample_weight_mode on the model, then you will
# need to recompile metrics and recreate the execution function. This is
# indicated by the `_recompile_exec_function` property.
if (
distributed_model
and hasattr(distributed_model, "_distribution_function")
and not (
hasattr(distributed_model, "_recompile_exec_function")
and distributed_model._recompile_exec_function
)
):
return distributed_model._distributed_function
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
assert distributed_model
# Also create an execution function on that distributed model.
if tf.executing_eagerly():
distributed_function = _make_eager_execution_function(model, mode)
else:
distributed_function = _make_graph_execution_function(model, mode)
# We cache the distributed execution function on the model since creating
# distributed models and execution functions are expensive.
distributed_model._distributed_function = distributed_function
distributed_model._recompile_exec_function = False
return distributed_function
def _make_graph_execution_function(model, mode):
"""Makes function to run one step of distributed model in graph mode."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
strategy = model._distribution_strategy
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
(
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
) = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),)
)
# Initialize the variables in the replicated model. This is necessary
# for multi-worker training because on some workers, initialization is
# not needed. This method does initialization or waiting for
# initialization according to the context object of distribute
# coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from
# `call_for_each_replica`. Unwrapping per device values gives you a
# list of values that can be used to construct a new train function that
# is composed of update ops on all the devices over which the model is
# distributed.
(
all_inputs,
all_outputs,
all_updates,
all_session_args,
) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT),
)
return backend.function(
all_inputs,
all_outputs,
updates=all_updates,
name=f"distributed_{mode}_function",
**all_session_args,
)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of
# using the global one.
strategy = model._distribution_strategy
global_graph = backend.get_graph()
with global_graph.as_default(), strategy.scope():
# First we gather the relevant portions of the model across all
# replicas. `backend._scratch_graph(global_graph)` signals to Keras
# that it should not lift to a separate graph when creating the
# per-replica functions.
with backend._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function,
args=(get_distributed_model(model, mode),),
)
grouped_inputs, grouped_outputs = grouped
# Unwrap all the per device values returned from
# `call_for_each_replica`. Unwrapping per device values gives you a
# list of values that can be used to construct a new train function
# that is composed of inputs/outputs on all the devices over which
# the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT),
)
# Finally, a joint TF-Keras function is created; this one will be
# created in a separate FuncGraph.
return backend.function(
all_inputs,
all_outputs,
name=f"eager_distributed_{mode}_function",
)
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model
)[0].get_weights()
model.set_weights(updated_weights)
def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):
"""Aggregates the per-replica batch-level outputs from a distributed
step."""
if strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = strategy.num_replicas_in_sync
nested_outs = batch_outs[
i * num_replicas : i * num_replicas + num_replicas
]
total_batch_outs.append(
concat_along_batch_dimension(tf.nest.flatten(nested_outs))
)
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(
distributed_model
)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def get_distributed_function(model, mode):
key = _generate_cache_key(mode)
return model._distributed_function_cache.get(key, None)
def set_distributed_function(model, mode, distributed_function):
key = _generate_cache_key(mode)
model._distributed_function_cache[key] = distributed_function
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), backend.learning_phase_scope(learning_phase):
yield
def is_current_worker_chief():
return dc.get_current_worker_context().is_chief
def filter_distributed_callbacks(callbacks_list, model):
"""Filter Callbacks based on the worker context when running multi-worker.
Args:
callbacks_list: A list of `Callback` instances.
model: TF-Keras model instance.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not model._in_multi_worker_mode():
raise ValueError(
"filter_distributed_callbacks() should only be called when "
"TF-Keras is in multi worker mode."
)
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to (possibly with tempfile directory).
logging.warning(
"ModelCheckpoint callback is not provided. "
"Workers will need to restart training if any fails."
)
if callbacks_list is None or is_current_worker_chief():
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback
for callback in callbacks_list
if not callback._chief_worker_only
]
def _update_sample_weight_modes(model, mode, sample_weights):
"""Update sample_weight_mode of the distributed model."""
if is_distributing_by_cloning(model):
distributed_model = get_distributed_model(model, mode)
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
distributed_model._recompile_exec_function = any(
[e.sample_weights_mismatch() for e in model._training_endpoints]
)
if sample_weights:
distributed_models = flatten_per_replica_values(
model._distribution_strategy, distributed_model
)
# sample_weights is a tuple of 1 list where the number of elements
# in the list is equal to the number of replicas in sync.
sample_weights = sample_weights[0]
if sample_weights and None not in sample_weights:
for m, sw in zip(distributed_models, sample_weights):
m._update_sample_weight_modes(sample_weights=[sw])
def concat_along_batch_dimension(outputs):
"""Concats prediction outputs along the batch dimension."""
if isinstance(outputs[0], tf.SparseTensor):
return tf.sparse.concat(axis=0, sp_inputs=outputs)
if isinstance(outputs[0], tf.RaggedTensor):
return tf.concat(outputs, axis=0)
return np.concatenate(outputs)
| tf-keras/tf_keras/distribute/distributed_training_utils_v1.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/distributed_training_utils_v1.py",
"repo_id": "tf-keras",
"token_count": 19084
} | 210 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of ModelCheckpoint callback."""
import os
import sys
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import callbacks
from tf_keras.distribute import multi_worker_testing_utils
class ModelCheckpointTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
file_format=["h5", "tf"],
save_weights_only=[True, False],
)
)
def testCheckpointExists(self, file_format, save_weights_only):
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(64, 2)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
saving_dir = self.get_temp_dir()
saving_filepath = os.path.join(saving_dir, "checkpoint." + file_format)
callbacks_list = [
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only
)
]
self.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds, epochs=2, steps_per_epoch=2, callbacks=callbacks_list
)
tf_saved_model_exists = tf.io.gfile.exists(saving_filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(
saving_filepath + ".index"
)
self.assertTrue(
tf_saved_model_exists or tf_weights_only_checkpoint_exists
)
if __name__ == "__main__":
with tf.compat.v1.test.mock.patch.object(sys, "exit", os._exit):
tf.test.main()
| tf-keras/tf_keras/distribute/model_checkpoint_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/model_checkpoint_test.py",
"repo_id": "tf-keras",
"token_count": 894
} | 211 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities for DTensor unit test."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
# isort: off
from tensorflow.dtensor.python import api as dtensor_api
from tensorflow.python.eager import context
_DEFAULT_GPU_MEMORY_LIMIT = 200 # MB
class DTensorBaseTest(tf.test.TestCase, parameterized.TestCase):
"""Provides comparison helper for dtensor vs local results."""
@classmethod
def setUpClass(cls):
super(DTensorBaseTest, cls).setUpClass()
def tearDown(self):
super().tearDown()
# Make sure all async ops finish.
context.async_wait()
# TODO(hthu): Remove the reset once we fixed the CopyToMesh with
# DefaultMesh placement issue.
reset_dtensor()
@staticmethod
def configTestMesh(device_type_mesh_map):
"""Configs corresponding mesh given test context.
If runs on a CPU mesh, set virtual device on CPU.
If runs on a GPU mesh, sets virtual device on GPU with proper memory
limits.
if runs on a TPU mesh, initializes TPU system.
Args:
device_type_mesh_map: A dictionary containing device_type -> mesh
mapping.
Returns:
A properly configured mesh for use in test.
"""
reset_context()
def get_mesh(device_type):
mesh = device_type_mesh_map.get(device_type, None)
if mesh is None:
dt = device_type
raise ValueError(f"Requires a {dt} mesh to run test on {dt}.")
return mesh
mesh = None
if tf.config.list_physical_devices("GPU"):
mesh = get_mesh("GPU")
reset_logical_devices("GPU", np.prod(mesh.shape()))
else:
mesh = get_mesh("CPU")
reset_logical_devices("CPU", np.prod(mesh.shape()))
context.ensure_initialized()
return mesh
def create_device_array(shape, device_type):
device_count = np.prod(shape)
return np.asarray(
[
tf.DeviceSpec(
job="localhost/replica:0/task:0",
device_type=device_type,
device_index=i,
)
for i in range(device_count)
]
).reshape(shape)
def create_device_list(shape, device_type):
devices = create_device_array(shape, device_type)
return np.ravel(devices).tolist()
def create_device_ids_array(shape):
device_count = np.prod(shape)
return np.arange(device_count).reshape(shape)
def reset_context():
context._reset_context()
def reset_logical_devices(device_type, count):
"""Resets logical devices for CPU/GPU.
Logical devices can only be instantiated once on a particular context. For
now, context re-use is triggering some function duplication errors, so we
reset the context on each call.
Args:
device_type: The device_type to reset.
count: numbers of virtual device to reset to.
"""
if device_type.upper() not in ["CPU", "GPU"]:
raise ValueError(
"resetting logical device for non-supported device type: "
f"{device_type}"
)
reset_context()
cpus = tf.config.list_physical_devices("CPU")
if device_type.upper() == "GPU":
gpus = tf.config.list_physical_devices(device_type)
tf.config.set_logical_device_configuration(
gpus[0],
[
tf.config.LogicalDeviceConfiguration(
memory_limit=_DEFAULT_GPU_MEMORY_LIMIT
),
]
* count,
)
# Always config CPU mesh as the host mesh for DTensor
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
]
* count,
)
def reset_dtensor():
dtensor_api._reset()
| tf-keras/tf_keras/dtensor/test_util.py/0 | {
"file_path": "tf-keras/tf_keras/dtensor/test_util.py",
"repo_id": "tf-keras",
"token_count": 1853
} | 212 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.distribute import distributed_training_utils
from tf_keras.engine import training_utils
from tf_keras.utils import data_utils
from tf_keras.utils import dataset_creator
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.distribute.input_lib import (
DistributedDataset,
)
from tensorflow.python.eager import context
from tensorflow.python.framework import type_spec
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.data.ops import (
from_sparse_tensor_slices_op,
)
from tensorflow.python.data.ops import from_generator_op
from tensorflow.python.data.ops import range_op
from tensorflow.python.data.ops import from_tensors_op
from tensorflow.python.data.ops import from_tensor_slices_op
try:
import pandas as pd
except ImportError:
pd = None
class DataAdapter(object, metaclass=abc.ABCMeta):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a
lot of assumptions under the hood, e.g. eager context by default,
distribution strategy, etc. In the meantime, some legacy feature support
might be dropped, eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if
there multiple input/output, or dictionary of objects when the
input/output are named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking
this method. Provide unsupported data type will result into unexpected
behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the
construction of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when
constructing the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to
respect the strategy.
DataAdapter might choose to ignore any keyword argument if it
doesn't use it, or raise exception if any required argument is not
provided.
"""
if not self.can_handle(x, y):
raise ValueError(f"{self.__class__} Cannot handle input {x}, {y}")
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or in
graph mode, provide the iterator tensor to TF-Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown.
The caller could use this to control the loop of training, show
progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= self.batch_size() - self.partial_batch_size()
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = tf.nest.flatten(x)
if y is not None:
flat_inputs += tf.nest.flatten(y)
tensor_types = _get_tensor_types()
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(
self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs,
):
super().__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes
)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True
)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(
int(i.shape[0]) for i in tf.nest.flatten(inputs)
).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = tf.data.Dataset.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices
# rather than reusing the same range Tensor. (presumably because of
# buffer forwarding.)
indices = tf.range(num_samples, dtype=tf.int64)
if shuffle and shuffle != "batch":
indices = tf.random.shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take
# quite a while so we don't want to wait for prefetching over an epoch
# boundary to trigger the next permutation. On the other hand, too many
# simultaneous shuffles can contend on a hardware level and degrade all
# performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is
to slice the Tensor in a Dataset map. (With a condition on the upper
index to handle the partial batch.) However it turns out that
coercing the Tensor into a shape which is divisible by the batch
size (and handling the last partial batch separately) allows for a
much more favorable memory access pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire
epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = tf.slice(indices, [0], [num_in_full_batch])
first_k_indices = tf.reshape(
first_k_indices, [num_full_batches, batch_size]
)
flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = tf.data.Dataset.from_tensors(
tf.slice(
indices, [num_in_full_batch], [self._partial_batch_size]
)
)
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return tf.nest.map_structure(tf.random.shuffle, batch)
dataset = dataset.map(shuffle_batch)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
dataset = dataset.with_options(options)
self._dataset = dataset.prefetch(tf.data.AUTOTUNE)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = tf.data.Dataset.zip(
(indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat())
)
def grab_batch(i, data):
return tf.nest.map_structure(
lambda d: tf.gather(d, i, axis=0), data
)
dataset = dataset.map(grab_batch, num_parallel_calls=tf.data.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of
# (unnecessary) input pipeline graph serialization and deserialization
options = tf.data.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
tf.data.experimental.ExternalStatePolicy.IGNORE
)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = tf.nest.flatten(x)
if y is not None:
flat_inputs += tf.nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__")
and hasattr(v, "shape")
and hasattr(v, "dtype")
and hasattr(v, "__len__")
)
if not TensorLikeDataAdapter.can_handle(
x, y
) and not CompositeTensorDataAdapter.can_handle(x, y):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warning(
"Keras is training/fitting/evaluating on array-like data. TF-Keras "
"may not be optimized for this format, so if your input data "
"format is supported by TensorFlow I/O "
"(https://github.com/tensorflow/io) we recommend using that to "
"load a Dataset instead."
)
super().__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = tf.nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(
data, ind.numpy(), contiguous=contiguous
)
return [slice_array(inp) for inp in flat_inputs]
flat_out = tf.py_function(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return tf.nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=tf.data.AUTOTUNE
)
return dataset
class DatasetCreatorAdapter(DataAdapter):
"""Adapter that handles dataset functions."""
def __init__(self, x, y, steps=None, distribution_strategy=None, **kwargs):
super().__init__(x, **kwargs)
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError(
"The input of a `DatasetCreatorAdapter` should be a "
"`DatasetCreator` but it received type {}.".format(type(x))
)
if steps is None:
if not kwargs.get("pss_evaluation_shards"):
raise ValueError(
"When using a "
"`tf.keras.utils.experimental.DatasetCreator`, "
"`steps_per_epoch`, `validation_steps`, `steps`, or "
"`pss_evaluation_shards` argument must be provided in "
"`Model.fit`, `Model.evaluate`, or `Model.predict`."
)
self.dataset_creator = x
self.steps = steps
self.strategy = distribution_strategy
@staticmethod
def can_handle(x, y=None):
if isinstance(x, dataset_creator.DatasetCreator):
assert y is None
return True
def should_recreate_iterator(self):
# We expect users to shuffle the dataset in their `dataset_fn` supplied
# to `DatasetCreator`. Since that is a buffered shuffle, we intend to
# not reset the dataset so the batches that are not shuffled can still
# be pulled.
return False
def get_size(self):
return None # To be inferred by `DataHandler`.
def get_dataset(self):
return self.strategy.distribute_datasets_from_function(
self.dataset_creator, options=self.dataset_creator.input_options
)
def batch_size(self):
raise NotImplementedError()
def has_partial_batch(self):
raise NotImplementedError()
def partial_batch_size(self):
raise NotImplementedError()
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = tf.nest.flatten(x)
if y is not None:
flat_inputs += tf.nest.flatten(y)
def _is_composite(v):
# Dataset/iterator/DistributedDataset inherits from CompositeTensor
# but should be handled by DatasetAdapter and GeneratorAdapter.
if (
tf_utils.is_extension_type(v)
and not isinstance(v, (tf.data.Dataset, tf.data.Iterator))
and not _is_distributed_dataset(v)
):
return True
# Support Scipy sparse tensors if scipy is installed
return _is_scipy_sparse(v)
def _is_tensor_or_composite(v):
if isinstance(v, (tf.Tensor, np.ndarray)):
return True
return _is_composite(v)
return any(_is_composite(v) for v in flat_inputs) and all(
_is_tensor_or_composite(v) for v in flat_inputs
)
def __init__(
self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs,
):
super().__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes
)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True
)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
num_samples = int(tf.nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = self._size != (num_samples // batch_size)
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size
)
self._dataset = dataset.prefetch(tf.data.AUTOTUNE)
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)) and inp:
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(
self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs,
):
super().__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes
)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs,
)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(
x, (tf.compat.v1.data.Dataset, tf.data.Dataset)
) or _is_distributed_dataset(x)
def __init__(self, x, y=None, sample_weights=None, steps=None, **kwargs):
super().__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the
# user provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(
y, sample_weights, steps, kwargs.get("pss_evaluation_shards")
)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (
self._user_steps is None
or tf.data.experimental.cardinality(self._dataset).numpy()
== self._user_steps
)
def _validate_args(self, y, sample_weights, steps, pss_evaluation_shards):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError(
"`y` argument is not supported when using dataset as input."
)
if not is_none_or_empty(sample_weights):
raise ValueError(
"`sample_weight` argument is not supported when using "
"dataset as input."
)
if steps is None:
if _is_distributed_dataset(self._dataset):
if not pss_evaluation_shards:
raise ValueError(
"When providing a distributed dataset, you must "
"specify the number of steps to run."
)
else:
size = tf.data.experimental.cardinality(self._dataset).numpy()
if size == tf.data.experimental.INFINITE_CARDINALITY:
if pss_evaluation_shards:
raise ValueError(
"When performing exact evaluation, the dataset "
"must be finite. Make sure not to call `repeat()` "
"on your dataset."
)
else:
raise ValueError(
"When providing an infinite dataset, you must "
"specify the number of steps to run (if you did "
"not intend to create an infinite dataset, make "
"sure to not call `repeat()` on the dataset)."
)
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return (
(hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence)
)
def __init__(
self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs,
):
# Generators should never shuffle as exhausting the generator in order
# to shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError(
"`y` argument is not supported when using "
"python generator as input."
)
if not is_none_or_empty(sample_weights):
raise ValueError(
"`sample_weight` argument is not supported when using "
"python generator as input."
)
super().__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build
# the dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
try:
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,)
)
except NotImplementedError:
# The above call may fail if the model is a container-like class
# that does not implement its own forward pass (e.g. a GAN or
# VAE where the forward pass is handled by subcomponents). Such
# a model does not need to be built.
pass
self._first_batch_size = int(tf.nest.flatten(peek)[0].shape[0])
def _get_tensor_spec(t):
# TODO(b/226395276): Remove _with_tensor_ranks_only usage.
return type_spec.type_spec_from_value(t)._with_tensor_ranks_only()
output_signature = tf.nest.map_structure(_get_tensor_spec, peek)
# Note that dataset API takes a callable that creates a generator
# object, rather than generator itself, which is why we define a
# function here.
generator_fn = self._handle_multiprocessing(
x, workers, use_multiprocessing, max_queue_size
)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = tf.data.Dataset.from_generator(
wrapped_generator, output_signature=output_signature
)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset.prefetch(tf.data.AUTOTUNE)
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = tf.__internal__.nest.list_to_tuple(data)
def _convert_dtype(t):
if isinstance(t, np.ndarray) and issubclass(
t.dtype.type, np.floating
):
return np.array(t, dtype=backend.floatx())
return t
data = tf.nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(
self, x, workers, use_multiprocessing, max_queue_size
):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing
)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(
self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs,
):
if not is_none_or_empty(y):
raise ValueError(
"`y` argument is not supported when using "
"`keras.utils.Sequence` as input."
)
if not is_none_or_empty(sample_weights):
raise ValueError(
"`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input."
)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super().__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs,
)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(
self, x, workers, use_multiprocessing, max_queue_size
):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x,
use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence,
)
self._enqueuer.start(
workers=workers, max_queue_size=max_queue_size
)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return len(self._keras_sequence)
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter,
TensorLikeDataAdapter,
GenericArrayLikeDataAdapter,
DatasetAdapter,
GeneratorDataAdapter,
KerasSequenceAdapter,
CompositeTensorDataAdapter,
DatasetCreatorAdapter,
]
UNSHARDABLE_DATASET_TYPES = [
from_generator_op._GeneratorDataset,
range_op._RangeDataset,
from_sparse_tensor_slices_op._SparseTensorSliceDataset,
from_tensors_op._TensorDataset,
from_tensor_slices_op._TensorSliceDataset,
]
def select_data_adapter(x, y):
"""Selects a data adapter that can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle input: {}, {}".format(
_type_name(x), _type_name(y)
)
)
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(adapter_cls, _type_name(x), _type_name(y))
)
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return f"({type(x)} containing {key_types} keys and {val_types} values)"
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return f"({type(x)} containing values of types {types})"
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(3) Converts `pandas.Series` to `Tensor`s
(4) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_single_tensor(x):
if _is_pandas_series(x):
x = np.expand_dims(x.to_numpy(), axis=-1)
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return tf.convert_to_tensor(x, dtype=dtype)
elif _is_scipy_sparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = tf.nest.map_structure(_convert_single_tensor, inputs)
return tf.__internal__.nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not tf.nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not tf.nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
tf.nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes),
)
except (ValueError, TypeError):
target_str = str(
tf.nest.map_structure(lambda _: "...", target_structure)
)
mode_str = str(
tf.nest.map_structure(lambda _: "...", sample_weight_modes)
)
# Attempt to coerce sample_weight_modes to the target structure.
# This implicitly depends on the fact that Model flattens outputs
# for its internal representation.
try:
sample_weight_modes = tf.nest.pack_sequence_as(
target_structure, tf.nest.flatten(sample_weight_modes)
)
logging.warning(
"sample_weight modes were coerced from\n "
"{}\n to \n {}".format(target_str, mode_str)
)
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(
target_str, mode_str
)
)
return sample_weight_modes
class DataHandler:
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None,
distribute=True,
pss_evaluation_shards=0,
):
"""Initializes a `DataHandler`.
Arguments:
x: See `Model.fit`.
y: See `Model.fit`.
sample_weight: See `Model.fit`.
batch_size: See `Model.fit`.
steps_per_epoch: See `Model.fit`.
initial_epoch: See `Model.fit`.
epochs: See `Model.fit`.
shuffle: See `Model.fit`.
class_weight: See `Model.fit`.
max_queue_size: See `Model.fit`.
workers: See `Model.fit`.
use_multiprocessing: See `Model.fit`.
model: The `Model` instance. Needed in order to correctly `build` the
`Model` using generator-like inputs (see `GeneratorDataAdapter`).
steps_per_execution: See `Model.compile`.
distribute: Whether to distribute the `tf.dataset`.
`PreprocessingLayer.adapt` does not support distributed datasets,
`Model` should always set this to `True`.
pss_evaluation_shards: See `Model.fit`.
"""
if batch_size is not None:
_check_positive("batch_size", batch_size)
if steps_per_epoch not in (None, -1) and steps_per_epoch <= 0:
raise ValueError(
"steps_per_epoch must be positive, None or -1. Received "
f"{steps_per_epoch}. See `Model.fit`."
)
self._initial_epoch = _check_non_negative(
"initial_epoch", initial_epoch
)
_check_positive("max_queue_size", max_queue_size)
_check_positive("workers", workers)
if steps_per_execution is not None:
_check_positive("steps_per_execution", steps_per_execution)
self._initial_step = 0
self._epochs = _check_positive("epochs", epochs)
self._insufficient_data = False
self._model = model
self._steps_per_epoch = steps_per_epoch
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = tf.Variable(1)
else:
self._steps_per_execution = steps_per_execution
adapter_cls = select_data_adapter(x, y)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=tf.distribute.get_strategy(),
model=model,
pss_evaluation_shards=pss_evaluation_shards,
)
strategy = tf.distribute.get_strategy()
self._current_step = 0
self._step_increment = self._steps_per_execution.numpy().item() - 1
self._insufficient_data = False
self._configure_dataset_and_inferred_steps(
strategy, x, steps_per_epoch, class_weight, distribute
)
if self._inferred_steps == 0:
raise ValueError("Expected input data to be non-empty.")
def _configure_dataset_and_inferred_steps(
self, strategy, x, steps_per_epoch, class_weight, distribute
):
"""Configure the `_dataset` and `_inferred_steps` attributes."""
del x
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
# `PreprocessingLayer.adapt` does not currently support distributed
# datasets, so we pass `distribute=False` there.
if distribute and not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
if not isinstance(self._dataset, DistributedDataset):
steps = self._infer_steps(
self._steps_per_epoch, self._dataset
)
if steps is not None:
self._inferred_steps = steps
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None
and self._steps_per_execution.numpy().item() > self._inferred_steps
)
original_value = self._steps_per_execution.numpy().item()
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
def sync(self):
context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
with distributed_training_utils.maybe_preemption_handler_scope(
self._model
):
try:
yield
self.sync()
except (StopIteration, tf.errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches (in this "
"case, {} batches). You may need to use the repeat() "
"function when building your dataset.".format(
total_epochs * self._inferred_steps
)
)
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = self._initial_step
self._initial_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (
self._inferred_steps is None
or self._current_step < self._inferred_steps
):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
original_spe = self._steps_per_execution.numpy().item()
can_run_full_execution = (
original_spe == 1
or self._inferred_steps is None
or self._inferred_steps - self._current_step >= original_spe
)
if can_run_full_execution:
self._step_increment = original_spe - 1
yield self._current_step
self._current_step += original_spe
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(original_spe)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`,
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _log_indefinite_training_warning(self):
logging.warning(
"The training loop will run indefinitely since you have "
"set `steps_per_epoch=-1`. Please use batch-level "
"callbacks to save checkpoints or log training progress, "
"etc"
)
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps == -1:
self._log_indefinite_training_warning()
return None
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
# tf.distribute's `PerWorkerDataset` does not inherit from
# `tf.data.Dataset` and in those cases we give up on inferring steps.
if not isinstance(dataset, tf.data.Dataset):
return None
size = tf.data.experimental.cardinality(dataset)
if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None:
raise ValueError(
"When passing an infinitely repeating dataset, please specify "
"a `steps_per_epoch` value so that epoch level "
"callbacks continue to work. The value can be arbitrary, or a "
"number that you think correctly defines the size of an epoch. "
"Epoch-level callbacks will then be called at this interval."
)
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if (
self._steps_per_execution.numpy().item() > 1
and self._inferred_steps is None
):
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of "
"steps to run."
)
class _ClusterCoordinatorDataHandler(DataHandler):
"""A `DataHandler` that is compatible with `ClusterCoordinator`."""
def __init__(self, x, y=None, **kwargs):
if not _is_distributed_dataset(x) and not isinstance(
x, (dataset_creator.DatasetCreator, tf.data.Dataset)
):
x = self._convert_to_dataset_creator(x, y, **kwargs)
super().__init__(x=x, **kwargs)
def _convert_to_dataset_creator(self, x, y, **kwargs):
"""Converts non-tf.data.Dataset to `DatasetCreator` instances."""
def _dataset_fn(input_context):
del input_context
data_adapter_cls = select_data_adapter(x, y)
return data_adapter_cls(x=x, y=y, **kwargs).get_dataset()
# This check is needed because types like `tf.data.Dataset` don't work
# with PSS yet. So only apply this logic to the types we can support.
if isinstance(x, _get_tensor_types()) and isinstance(
y, _get_tensor_types()
):
return dataset_creator.DatasetCreator(_dataset_fn)
else:
raise NotImplementedError(
"Only `tf.keras.utils.experimental.DatasetCreator`, "
"`tf.Tensor`, numpy arrays and pandas dataframes are "
"supported types at this time."
)
def _configure_dataset_and_inferred_steps(
self, strategy, x, steps_per_epoch, class_weight, distribute
):
if isinstance(x, dataset_creator.DatasetCreator):
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(
x, options=x.input_options
)
coordinator = self._model._cluster_coordinator
self._dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn
)
else:
assert distribute
if not _is_distributed_dataset(x):
x = strategy.experimental_distribute_dataset(x)
coordinator = self._model._cluster_coordinator
self._dataset = coordinator.create_per_worker_dataset(x)
if steps_per_epoch == -1:
self._inferred_steps = None
self._log_indefinite_training_warning()
else:
self._inferred_steps = steps_per_epoch
def sync(self):
self._model._cluster_coordinator.join()
class _ClusterCoordinatorExactEvalDataHandler(_ClusterCoordinatorDataHandler):
def __init__(self, x, y=None, **kwargs):
super().__init__(x=x, **kwargs)
self._total_shards = kwargs.get("pss_evaluation_shards")
def _warn_if_not_file_shardable(self, dataset):
# Traverse backwards to find source dataset and check if that is one of
# the unshardable types
# TODO(b/268521864): expand this to inspect dataset function graphs and
# use the auto-sharding logic rather than re-creating it here.
cur_dataset = dataset
while hasattr(cur_dataset, "_input_dataset"):
cur_dataset = cur_dataset._input_dataset
if type(cur_dataset) in UNSHARDABLE_DATASET_TYPES:
logging.warning(
"Found source dataset of type {}. This type is not "
"efficiently shardable, so exact evaluation may be "
"slower than inexact evaluation. Try converting to "
"a TFRecord or other file-based dataset if "
"performance is a concern.".format(type(cur_dataset))
)
def _configure_dataset_and_inferred_steps(
self, strategy, x, steps_per_epoch, class_weight, distribute
):
if isinstance(x, dataset_creator.DatasetCreator):
def per_worker_dataset_fn():
ddf = strategy.distribute_datasets_from_function(
x, options=x.input_options
)
return ddf
coordinator = self._model._cluster_coordinator
self._dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn
)
logging.info("dataset element spec: %r", self._dataset.element_spec)
self._dataset = self._dataset.build()
else:
# TODO(b/268226218): Support DistributedDataset input
if not _is_distributed_dataset(x):
self._warn_if_not_file_shardable(x)
x = strategy.experimental_distribute_dataset(x)
coordinator = self._model._cluster_coordinator
self._dataset = coordinator.create_per_worker_dataset(x)
self._dataset = self._dataset.build()
if steps_per_epoch == -1:
self._inferred_steps = None
self._log_indefinite_training_warning()
else:
self._inferred_steps = steps_per_epoch
def enumerate_epochs(self):
"""Yields `(epoch, dataset)`."""
for epoch in range(self._initial_epoch, self._epochs):
yield epoch, self._dataset
self._adapter.on_epoch_end()
def steps(self):
"""Yields steps for the current epoch."""
for step in range(self._total_shards):
yield step
@keras_export("keras.__internal__.utils.get_data_handler", v1=[])
def get_data_handler(*args, **kwargs):
"""Creates a `DataHandler`, providing standardized access to a `Dataset`.
See `DataHandler` for the list and definition of the arguments. See the
implementation of `Model.fit()`, `evaluate()`, or `predict()` methods
for complete usage examples. As a rule of tumb, `get_data_handler()` accepts
the same inputs as the `x` argument of `Model.fit()`.
Example:
```python
def step(iterator):
data = next(iterator)
# result <= Do something with data
return result
tf_step = tf.function(step, reduce_retracing=True)
# Assume x is a tf.data Dataset.
data_handler = data_adapter.get_data_handler(x=x)
# Epoch iteration
for epo_idx, iterator in data_handler.enumerate_epochs():
# Stop on dataset exhaustion.
with data_handler.catch_stop_iteration():
for step in data_handler.steps(): # Step iteration
step_result = step(iterator)
```
Args:
*args: Arguments passed to the `DataHandler` constructor.
**kwargs: Arguments passed to the `DataHandler` constructor.
Returns:
A `DataHandler` object. If the model's cluster coordinate is set (e.g. the
model was defined under a parameter-server strategy), returns a
`_ClusterCoordinatorDataHandler`.
"""
if getattr(kwargs["model"], "_cluster_coordinator", None):
if kwargs.get("pss_evaluation_shards"):
return _ClusterCoordinatorExactEvalDataHandler(*args, **kwargs)
return _ClusterCoordinatorDataHandler(*args, **kwargs)
return DataHandler(*args, **kwargs)
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}"
).format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = tf.convert_to_tensor(
[class_weight[int(c)] for c in class_ids]
)
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if tf.nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
backend.shape(y)[-1] > 1,
lambda: backend.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int64),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int64)
cw = tf.gather(class_weight_tensor, y_classes)
if sw is not None:
cw = tf.cast(cw, sw.dtype)
# `class_weight` and `sample_weight` are multiplicative.
# If class_weight has more than 2 dimensions, we need to reshape
# sample_weight to make broadcasting possible for multiplication.
rank_delta = cw.shape.rank - sw.shape.rank
sw = tf.reshape(sw, sw.shape + [1] * rank_delta)
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = _get_tensor_types()
return isinstance(t, tensor_types) or t is None
flat_arrays = tf.nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable)
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument.".format(
batch_dim=batch_dim, validation_split=validation_split
)
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = tf.nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays
)
val_arrays = tf.nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays
)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are
not provided.
"""
if isinstance(data, list):
data = tuple(data)
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = (
"Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}"
).format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unnecessary tuple.
if not isinstance(x, tuple or list):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(
strategy, x, y=None, sample_weight=None, class_weight=None
):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = tf.data.Dataset.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label,
", ".join(
str(i.shape[0]) for i in tf.nest.flatten(single_data)
),
)
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _check_non_negative(name, value):
if value < 0:
raise ValueError(
f"Expected {name} to be non-negative. Received is {value}."
)
return value
def _check_positive(name, value):
if value <= 0:
raise ValueError(
f"Expected {name} to be positive. Received is {value}."
)
return value
def _get_tensor_types():
if pd is None:
return (tf.Tensor, np.ndarray)
else:
return (tf.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_scipy_sparse(x):
try:
from scipy.sparse import issparse
return issparse(x)
except ImportError:
return False
def _is_pandas_series(x):
if pd is None:
return False
else:
return isinstance(x, pd.Series)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1
)
return tf.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
return isinstance(
ds,
(
tf.distribute.DistributedDataset,
tf.experimental.dtensor.DTensorDataset,
),
)
| tf-keras/tf_keras/engine/data_adapter.py/0 | {
"file_path": "tf-keras/tf_keras/engine/data_adapter.py",
"repo_id": "tf-keras",
"token_count": 32032
} | 213 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility object to handler partial batches for TPUStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
class PartialBatchPaddingHandler:
"""A container that holds info about partial batches for `predict()`."""
def __init__(self, output_shape):
self.padded_batch_size = 0
self.padding_mask = tf.zeros(0)
self.output_shape = output_shape
def get_real_batch_size(self, dataset_batch):
"""Returns the number of elements in a potentially partial batch."""
if isinstance(dataset_batch, (tuple, list)):
dataset_batch = dataset_batch[0]
assert tf.nest.flatten(dataset_batch)
def _find_any_tensor(batch_features):
tensors = [
x for x in tf.nest.flatten(batch_features) if tf.is_tensor(x)
]
if not tensors:
raise ValueError("Cannot find any Tensor in features dict.")
return tensors[0]
return backend.cast(
backend.shape(_find_any_tensor(dataset_batch))[0], dtype="int64"
)
def update_mask(self, padding_mask, dataset_batch):
"""Calculate and cache the amount of padding required for a batch."""
original_batch_size = self.get_real_batch_size(dataset_batch)
missing_count = self.padded_batch_size - original_batch_size
mask = backend.concatenate(
[tf.ones(original_batch_size), tf.zeros(missing_count)], axis=0
)
return backend.concatenate([padding_mask, mask], axis=0)
def pad_batch(self, *dataset_batch_elements):
"""Pads the batch dimension of a tensor to the complete batch size."""
def _pad(batch):
"""Helper function to pad nested data within each batch elements."""
padded_dict_batch = {}
if isinstance(batch, dict):
for key, value in batch.items():
padded_dict_batch[key] = _pad(value)
return padded_dict_batch
rank = len(batch.shape)
assert rank > 0
missing_count = self.padded_batch_size - self.get_real_batch_size(
batch
)
padding = backend.stack(
[[0, missing_count]] + [[0, 0]] * (rank - 1)
)
return tf.pad(batch, padding, "constant")
if len(dataset_batch_elements) == 1:
return _pad(dataset_batch_elements[0])
batch_elements = []
for batch_element in dataset_batch_elements:
batch_elements.append(_pad(batch_element))
return tuple(batch_elements)
def apply_mask(self, prediction_result):
"""Removes prediction output that corresponds to padded input."""
padding_mask = backend.get_value(self.padding_mask)
assert len(padding_mask.shape) == 1
if len(self.output_shape) == 1:
prediction = np.take(
prediction_result,
np.nonzero(padding_mask[: len(prediction_result)]),
axis=0,
)
if prediction.shape[0] == 1:
prediction = np.squeeze(prediction, axis=0)
return prediction
else:
predictions = []
for i in range(len(self.output_shape)):
prediction = prediction_result[i]
prediction = np.take(
prediction,
np.nonzero(padding_mask[: len(prediction)]),
axis=0,
)
predictions.append(np.squeeze(prediction))
return predictions
| tf-keras/tf_keras/engine/partial_batch_padding_handler.py/0 | {
"file_path": "tf-keras/tf_keras/engine/partial_batch_padding_handler.py",
"repo_id": "tf-keras",
"token_count": 1853
} | 214 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
import collections
import io
import sys
import tempfile
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras import layers as layers_module
from tf_keras import losses
from tf_keras import metrics as metrics_module
from tf_keras.callbacks import Callback
from tf_keras.engine import input_layer
from tf_keras.engine import sequential
from tf_keras.engine import training as training_module
from tf_keras.engine import training_utils_v1
from tf_keras.layers.preprocessing import string_lookup
from tf_keras.mixed_precision import policy
from tf_keras.optimizers import legacy as optimizer_legacy
from tf_keras.optimizers import rmsprop
from tf_keras.optimizers import sgd as sgd_experimental
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import data_utils
from tf_keras.utils import io_utils
from tf_keras.utils import np_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import (
RMSPropOptimizer,
)
try:
import scipy.sparse as scipy_sparse
except ImportError:
scipy_sparse = None
class TrainingTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_model_instrumentation(self):
layers = [
layers_module.Dense(10, dtype=np.float64),
layers_module.Dense(10, dtype=np.float64),
]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
self.assertTrue(model._instrumented_keras_api)
self.assertTrue(model._instrumented_keras_model_class)
self.assertFalse(model._instrumented_keras_layer_class)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_fit_training_arg(self):
class ReturnTraining(layers_module.Layer):
def call(self, inputs, training):
if training:
return inputs + tf.constant([100], "float32")
else:
return inputs + tf.constant([0], "float32")
model = sequential.Sequential([ReturnTraining()])
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
hist = model.fit(x=np.array([0.0]), y=np.array([0.0]))
self.assertAllClose(hist.history["loss"][0], 10000)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_fit_on_empty(self):
model = sequential.Sequential([layers_module.Dense(1)])
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
with self.assertRaisesRegex(
ValueError, "Expected input data to be non-empty."
):
model.fit(x=np.array([]), y=np.array([]))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_compile_fit_with_jit_compile(self):
# Test with jit_compile = True
model = sequential.Sequential([layers_module.Dense(1)])
model.compile("sgd", loss="mse", run_eagerly=False, jit_compile=True)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
# Test fcompile fit for a RNN model
model = sequential.Sequential()
model.add(
layers_module.TimeDistributed(
layers_module.Embedding(5, 6, mask_zero=True),
input_shape=(None, None),
)
) # N by t_1 by t_2 by 6
model.add(
layers_module.TimeDistributed(
layers_module.SimpleRNN(7, return_sequences=True)
)
)
model.add(
layers_module.TimeDistributed(
layers_module.SimpleRNN(8, return_sequences=False)
)
)
model.add(layers_module.SimpleRNN(1, return_sequences=False))
model.compile(optimizer="sgd", loss="mse", jit_compile=True)
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4), dtype="int32"
)
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(
model_input, np.random.random((10, 1)), epochs=1, batch_size=10
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_compile_fit_evaluate_predict_with_mirrored_strategy(self):
# Test with jit_compile = True
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = sequential.Sequential([layers_module.Dense(1)])
model.compile("sgd", loss="mse", run_eagerly=False, jit_compile=True)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
model.evaluate(x, y)
model.predict(x)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_distribution_reduction_method_sum_default_train_step(self):
strategy = tf.distribute.MirroredStrategy(
["/cpu:1", "/cpu:2", "/cpu:3", "/cpu:4"]
)
BATCH_SIZE = 10
# A model that always outputs `1`:
with strategy.scope():
inputs = layers_module.Input(shape=(1,), name="my_input")
outputs = layers_module.Dense(
units=1, kernel_initializer="zeros", bias_initializer="ones"
)(inputs)
model = training_module.Model(inputs, outputs)
model.trainable = False
model.compile(optimizer="sgd", loss="mean_absolute_error")
# Data points are always equal to `2`:
x, y = 2 * np.ones((40, 1)), 2 * np.ones((40, 1))
# For every output x_i = 1, every target y_i = 2,
# loss_i = |1-2| = 1; and
# loss_total = sum([1, 1, ..., 1]) / BATCH_SIZE = 1.0
history = model.fit(x, y, epochs=1, batch_size=BATCH_SIZE)
self.assertAllClose(history.history["loss"][-1], 1.0)
eval_output = model.evaluate(x, y, batch_size=BATCH_SIZE)
self.assertAllClose(eval_output, 1.0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_distribution_reduction_method_sum_custom_train_step(self):
strategy = tf.distribute.MirroredStrategy(
["/cpu:1", "/cpu:2", "/cpu:3", "/cpu:4"]
)
BATCH_SIZE = 10
class MyModel(training_module.Model):
@staticmethod
def reduce_loss(loss_value, global_batch_size):
REDUCTION_AXES = range(1, backend.ndim(loss_value))
loss_value = tf.reduce_mean(loss_value, axis=REDUCTION_AXES)
return tf.nn.compute_average_loss(
loss_value, global_batch_size=global_batch_size
)
def train_step(self, data):
loss_value = tf.ones_like(data[0])
return {
"loss": MyModel.reduce_loss(
loss_value, global_batch_size=BATCH_SIZE
)
}
def test_step(self, data):
loss_value = tf.ones_like(data[0])
return {
"metric": MyModel.reduce_loss(
loss_value, global_batch_size=BATCH_SIZE
)
}
with strategy.scope():
inputs = layers_module.Input(shape=(1,), name="my_input")
outputs = layers_module.Dense(1)(inputs)
model = MyModel(inputs, outputs)
model.compile()
x, y = np.ones((40, 1)), np.ones((40, 1))
history = model.fit(x, y, epochs=2, batch_size=BATCH_SIZE)
self.assertAllClose(history.history["loss"][-1], 1.0)
eval_output = model.evaluate(x, y, batch_size=BATCH_SIZE)
self.assertAllClose(eval_output, 1.0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_verify_xla_compile_with_jit_compile(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
model.compile(
"sgd", loss="mse", run_eagerly=False, jit_compile=True
)
# Added a string op unsupported by XLA compiler to make sure that an
# error is thrown, This ensures that the graph is indeed being
# compiled using XLA
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Graph execution error"
):
model.fit(input_array, expected_output, epochs=1)
model.predict(input_array)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_jit_compile_for_compile_evaluate_predict(self):
# Test with jit_compile = True for model.compile(), model.evaluate(),
# model.predict()
model = sequential.Sequential([layers_module.Dense(1)])
self.assertIsNone(model._jit_compile)
model.compile("sgd", loss="mse", run_eagerly=False, jit_compile=True)
self.assertTrue(model._jit_compile)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
model.evaluate(x, y)
model.predict(x)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_jit_compile_true_for_evaluate_predict_but_false_for_compile(self):
# Test with jit_compile = True for model.compile(), model.evaluate(),
# model.predict()
model = sequential.Sequential([layers_module.Dense(1)])
self.assertIsNone(model._jit_compile)
self.assertIsNone(model.jit_compile)
model.compile("sgd", loss="mse")
model.jit_compile = True
self.assertTrue(model._jit_compile)
self.assertTrue(model.jit_compile)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
model.evaluate(x, y)
model.predict(x)
self.assertTrue(model._jit_compile)
self.assertTrue(model.jit_compile)
model.compile("sgd", loss="mse", jit_compile=False)
self.assertFalse(model._jit_compile)
self.assertFalse(model.jit_compile)
model.compile("sgd", loss="mse", jit_compile=True)
self.assertTrue(model._jit_compile)
self.assertTrue(model.jit_compile)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_predict_xla_compile_with_jit_compile_setter_false_then_true(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
# Added a string op unsupported by XLA compiler to make sure that an
# error is thrown, This ensures that the graph is indeed being
# compiled using XLA
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Compiled without jit_compile
model.predict(input_array)
model.jit_compile = True
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Graph execution error"
):
model.predict(input_array)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_fit_without_loss_at_compile(self):
model = sequential.Sequential([layers_module.Dense(1)])
model.compile("sgd", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 1)), np.ones((10, 1))
with self.assertRaisesRegex(ValueError, "No loss found..*"):
model.fit(x, y, epochs=2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_fit_without_loss_at_compile_but_with_add_loss(self):
class MyModel(sequential.Sequential):
def call(self, x):
self.add_loss(tf.reduce_sum(x))
return x
model = MyModel([layers_module.Dense(1)])
model.compile("sgd", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
@test_combinations.run_all_keras_modes
def test_run_eagerly_setting(self):
model = sequential.Sequential([layers_module.Dense(1)])
run_eagerly = test_utils.should_run_eagerly()
model.compile("sgd", "mse", run_eagerly=run_eagerly)
self.assertEqual(model.run_eagerly, run_eagerly)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
("train_on_batch", "train_on_batch"),
("test_on_batch", "test_on_batch"),
("predict_on_batch", "predict_on_batch"),
("fit", "fit"),
("evaluate", "evaluate"),
("predict", "predict"),
)
def test_disallow_methods_inside_tf_function(self, method_name):
model = sequential.Sequential([layers_module.Dense(1)])
run_eagerly = test_utils.should_run_eagerly()
model.compile("sgd", "mse", run_eagerly=run_eagerly)
@tf.function
def my_fn():
getattr(model, method_name)(1)
error_msg = "inside a `tf.function`"
with self.assertRaisesRegex(RuntimeError, error_msg):
my_fn()
@test_combinations.run_all_keras_modes
def test_fit_and_validate_learning_phase(self):
class ReturnTraining(layers_module.Layer):
def call(self, inputs):
return backend.in_train_phase(
lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs)
)
model = sequential.Sequential([ReturnTraining(input_shape=(2,))])
model.compile(
"sgd", loss="mae", run_eagerly=test_utils.should_run_eagerly()
)
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
history = model.fit(
train_dataset, epochs=2, verbose=1, validation_data=val_dataset
)
# The training loss should be 0.0
self.assertAllClose(history.history["loss"][0], 0.0)
# The validation loss should be 1.0.
self.assertAllClose(history.history["val_loss"][0], 1.0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_warn_on_evaluate(self):
i = layers_module.Input((1,))
x = np.ones((100, 1))
y = np.ones((100, 1))
sample_weight = np.ones((100,))
model = training_module.Model(i, i)
model.compile(loss="mse", metrics=["mse"])
logging.set_verbosity(2)
with self.assertLogs(level=2) as logs:
model.evaluate(x, y, sample_weight=sample_weight)
self.assertTrue(
any(
"`evaluate()` received a value for `sample_weight`" in log
for log in logs.output
)
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_sample_weight_warning_disable(self):
i = layers_module.Input((1,))
x = np.ones((100, 1))
y = np.ones((100, 1))
sample_weight = np.ones((100,))
model = training_module.Model(i, i)
model.compile(loss="mse", metrics=["mse"], weighted_metrics=[])
logging.set_verbosity(2)
with self.assertLogs(level=2) as logs:
model.evaluate(x, y, sample_weight=sample_weight)
self.assertFalse(
any(
"`evaluate()` received a value for `sample_weight`" in log
for log in logs.output
)
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_warn_on_evaluate_with_tf_dataset(self):
i = layers_module.Input((1,))
x = tf.ones((100, 1), tf.float32)
y = tf.ones((100, 1), tf.float32)
sample_weight = tf.ones((100,), dtype=tf.float32)
val_dataset = tf.data.Dataset.from_tensor_slices(
(x, y, sample_weight)
).batch(10)
model = training_module.Model(i, i)
model.compile(loss="mse", metrics=["mse"])
logging.set_verbosity(2)
with self.assertLogs(level=2) as logs:
model.evaluate(val_dataset)
self.assertTrue(
any(
"`evaluate()` received a value for `sample_weight`" in log
for log in logs.output
)
)
@test_combinations.run_all_keras_modes
def test_fit_and_validate_training_arg(self):
class ReturnTraining(layers_module.Layer):
def call(self, inputs, training=None):
return backend.in_train_phase(
lambda: tf.ones_like(inputs),
lambda: tf.zeros_like(inputs),
training=training,
)
model = sequential.Sequential([ReturnTraining(input_shape=(2,))])
model.compile(
"sgd", loss="mae", run_eagerly=test_utils.should_run_eagerly()
)
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
history = model.fit(
train_dataset, epochs=2, verbose=1, validation_data=val_dataset
)
# The training loss should be 0.0
self.assertAllClose(history.history["loss"][0], 0.0)
# The validation loss should be 1.0.
self.assertAllClose(history.history["val_loss"][0], 1.0)
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_target_dtype_matches_output(self):
def loss_fn(labels, preds):
self.assertEqual(labels.dtype, preds.dtype)
return labels - preds
layers = [
layers_module.Dense(10, dtype=np.float64),
layers_module.Dense(10, dtype=np.float64),
]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
inputs = np.ones(shape=(10, 1), dtype=np.float64)
targets = np.ones(shape=(10, 1), dtype=np.float64)
model.compile(
"sgd", loss=loss_fn, run_eagerly=test_utils.should_run_eagerly()
)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
self.assertEqual(model.predict(inputs).dtype, np.float64)
@test_combinations.run_all_keras_modes
def test_fit_and_validate_nested_training_arg(self):
class NestedReturnTraining(layers_module.Layer):
def call(self, inputs, training=None):
return backend.in_train_phase(
lambda: tf.ones_like(inputs),
lambda: tf.zeros_like(inputs),
training=training,
)
class ReturnTraining(layers_module.Layer):
def __init__(self, input_shape=None, **kwargs):
super().__init__(input_shape=input_shape, **kwargs)
self._nested_layer = None
def build(self, input_shape):
self._nested_layer = NestedReturnTraining()
self.built = True
def call(self, inputs):
return self._nested_layer(inputs)
model = sequential.Sequential([ReturnTraining(input_shape=(2,))])
model.compile(
"sgd", loss="mae", run_eagerly=test_utils.should_run_eagerly()
)
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(10)
history = model.fit(
train_dataset, epochs=2, verbose=1, validation_data=val_dataset
)
# The training loss should be 0.0
self.assertAllClose(history.history["loss"][0], 0.0)
# The validation loss should be 1.0.
self.assertAllClose(history.history["val_loss"][0], 1.0)
@test_combinations.run_with_all_model_types(exclude_models="sequential")
@test_combinations.run_all_keras_modes
def test_fit_on_arrays(self):
input_a = layers_module.Input(shape=(3,), name="input_a")
input_b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(4, name="dense")
dropout = layers_module.Dropout(0.5, name="dropout")
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = test_utils.get_multi_io_model(branch_a, branch_b)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), "mae"],
loss_weights=loss_weights,
run_eagerly=test_utils.should_run_eagerly(),
)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0,
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1,
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2,
)
model.train_on_batch(
[input_a_np, input_b_np], [output_d_np, output_e_np]
)
# Test with validation data
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
validation_data=(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
),
epochs=1,
batch_size=5,
verbose=0,
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
validation_data=(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
),
epochs=2,
batch_size=5,
verbose=1,
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
validation_data=(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
),
epochs=2,
batch_size=5,
verbose=2,
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
validation_data=[
[input_a_np, input_b_np],
[output_d_np, output_e_np],
],
epochs=2,
batch_size=5,
verbose=2,
)
# Test with validation split
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2,
)
if test_utils.get_model_type() == "functional":
# Test with dictionary inputs
model.fit(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
epochs=1,
batch_size=5,
verbose=0,
)
model.fit(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
epochs=1,
batch_size=5,
verbose=1,
)
model.fit(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
validation_data=(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
),
epochs=1,
batch_size=5,
verbose=0,
)
model.train_on_batch(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
)
# Test with lists for loss, metrics
loss = ["mae", "mse"]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), "mae"],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0,
)
# Test with dictionaries for loss, metrics, loss weights
if test_utils.get_model_type() == "functional":
loss = {"dense": "mse", "dropout": "mae"}
loss_weights = {"dense": 1.0, "dropout": 0.5}
metrics = {
"dense": "mse",
"dropout": metrics_module.CategoricalAccuracy(),
}
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0,
)
# Build single-input model
x = layers_module.Input(shape=(3,), name="input_a")
y = layers_module.Dense(4)(x)
model = training_module.Model(x, y)
model.compile(
optimizer, loss="mse", run_eagerly=test_utils.should_run_eagerly()
)
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
# Test model on a list of floats
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 4))
# Test execution on inputs that are lists of scalars.
# TF2 and TF1 have slightly different semantics:
if tf.executing_eagerly():
# In TF2 to avoid any ambiguity when there are nested lists
# the entire input gets converted to a
# single numpy array (& it only works in the case of a single io
# model)
model.fit(
np.ndarray.tolist(input_a_np),
np.ndarray.tolist(input_b_np),
epochs=2,
batch_size=5,
verbose=2,
)
else:
# In TF1 there was logic to try disambiguating between the
# individual inputs when lists are nested. This allowed multi-io
# functional models to support lists of scalars as input, but it
# caused ambiguity issues for subclass models & made it trickier to
# pass multi-dimensional inputs as lists of scalars to single io
# models. This was an excessive amount of complexity for what boiled
# down to a convenience method we were mainly just using for writing
# tests.
model.fit(
[np.ndarray.tolist(input_a_np)],
[np.ndarray.tolist(input_b_np)],
epochs=2,
batch_size=5,
verbose=2,
)
@test_combinations.run_all_keras_modes
def test_evaluate_predict_on_arrays(self):
a = layers_module.Input(shape=(3,), name="input_a")
b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = layers_module.Dropout(0.5, name="dropout")(c)
model = training_module.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=["mae", metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
sample_weight_mode=None,
run_eagerly=test_utils.should_run_eagerly(),
)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
batch_size=5,
verbose=0,
)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
batch_size=5,
verbose=1,
)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
batch_size=5,
verbose=2,
)
self.assertEqual(len(out), 7)
out = model.test_on_batch(
[input_a_np, input_b_np], [output_d_np, output_e_np]
)
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
batch_size=5,
verbose=0,
)
model.evaluate(
{"input_a": input_a_np, "input_b": input_b_np},
{"dense": output_d_np, "dropout": output_e_np},
batch_size=5,
verbose=1,
)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({"input_a": input_a_np, "input_b": input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch(
{"input_a": input_a_np, "input_b": input_b_np}
)
self.assertEqual(len(out), 2)
def _make_sequence_input_functions(self, input_type):
# train and test
xy_namedtuple = collections.namedtuple("xy_namedtuple", ["x", "y"])
# predict
x_namedtuple = collections.namedtuple("x_namedtuple", ["x"])
if input_type == "dataset":
dataset = tf.data.Dataset.range(16).map(
lambda _: tf.ones(shape=(1,))
)
xy_dataset = tf.data.Dataset.zip((dataset, dataset)).batch(4)
x_dataset = dataset.batch(4)
def xy_function(use_namedtuple):
return (
xy_dataset.map(xy_namedtuple)
if use_namedtuple
else xy_dataset
)
def x_function(use_namedtuple):
return (
x_dataset.map(x_namedtuple) if use_namedtuple else x_dataset
)
return xy_function, x_function
elif input_type == "generator":
def xy_generator(use_namedtuple):
x, y = np.ones((4, 1)), np.ones((4, 1))
for _ in range(4):
if use_namedtuple:
yield xy_namedtuple(x, y)
else:
yield x, y
def x_generator(use_namedtuple):
x = np.ones((4, 1))
for _ in range(4):
if use_namedtuple:
yield x_namedtuple(x)
else:
yield x
return xy_generator, x_generator
elif input_type == "sequence":
class XYSequence(data_utils.Sequence):
def __init__(self, use_namedtuple):
self._use_namedtuple = use_namedtuple
super().__init__()
def __getitem__(self, idx):
x, y = np.ones((4, 1)), np.ones((4, 1))
if self._use_namedtuple:
return xy_namedtuple(x, y)
return x, y
def __len__(self):
return 4
class XSequence(data_utils.Sequence):
def __init__(self, use_namedtuple):
self._use_namedtuple = use_namedtuple
super().__init__()
def __getitem__(self, idx):
x = np.ones((4, 1))
if self._use_namedtuple:
return x_namedtuple(x)
return x
def __len__(self):
return 4
return XYSequence, XSequence
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_combinations.run_with_all_model_types
@parameterized.named_parameters(
("dataset", "dataset"),
("generator", "generator"),
("sequence", "sequence"),
)
def test_sequence_input_types(self, input_type):
"""Ensure that namedtuples and tuples are plumbed identically."""
if not tf.executing_eagerly():
self.skipTest("Improved checking is only present in data_adapter.")
xy_function, x_function = self._make_sequence_input_functions(
input_type
)
fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {}
if input_type == "generator":
fit_kwargs["steps_per_epoch"] = 4
evaluate_kwargs["steps"] = 4
predict_kwargs["steps"] = 4
model = test_utils.get_small_mlp(1, 1, 1)
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(xy_function(use_namedtuple=False), **fit_kwargs)
model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs)
model.predict(x_function(use_namedtuple=False), **predict_kwargs)
@test_combinations.run_all_keras_modes
def test_custom_mapping_in_config(self):
class MyModel(training_module.Model):
def call(self, inputs):
return inputs
def get_config(self):
self.a = {}
return {"a": self.a}
model = MyModel()
self.assertIn('{"a": {}}', model.to_json())
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_get_config_default(self):
class MyModel(training_module.Model):
def __init__(self, units):
super().__init__()
self.units = units
def call(self, inputs):
return inputs
# Test default config with named args
model = MyModel(units=10)
config = model.get_config()
self.assertLen(config, 1)
self.assertEqual(config["units"], 10)
model = model.from_config(config)
self.assertDictEqual(model.get_config(), config)
# Test default config with positinal args
model = MyModel(10)
config = model.get_config()
self.assertLen(config, 1)
self.assertEqual(config["units"], 10)
model = model.from_config(config)
self.assertDictEqual(model.get_config(), config)
# Test non-serializable
model = MyModel(units=np.int32(10))
config = model.get_config()
self.assertNotIn("units", config)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_get_config_kwargs(self):
class MyModel(training_module.Model):
def __init__(self, units, **kwargs):
super().__init__()
self.units = units
def call(self, inputs):
return inputs
model = MyModel(10, extra=1)
config = model.get_config()
# config = {'name': 'my_model', 'trainable': True, 'dtype': 'float32',
# 'extra': 1, 'units': 10}
self.assertLen(config, 5)
self.assertEqual(config["units"], 10)
self.assertEqual(config["extra"], 1)
model = model.from_config(config)
self.assertDictEqual(model.get_config(), config)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_get_config_override(self):
class MyModel(training_module.Model):
def __init__(self, units):
super().__init__()
self.units = units
def call(self, inputs):
return inputs
def get_config(self):
config = {"units": int(self.units)}
config.update(super().get_config())
return config
model = MyModel(units=np.int32(10))
config = model.get_config()
self.assertLen(config, 1)
self.assertEqual(config["units"], 10)
model = model.from_config(config)
self.assertDictEqual(model.get_config(), config)
def test_training_on_sparse_data_with_dense_placeholders_v1(self):
with tf.Graph().as_default():
if scipy_sparse is None:
return
test_inputs = [
scipy_sparse.random(6, 3, density=0.25).tocsr()
for _ in range(2)
]
test_outputs = [
scipy_sparse.random(6, i, density=0.25).tocsr()
for i in range(3, 5)
]
in1 = layers_module.Input(shape=(3,))
in2 = layers_module.Input(shape=(3,))
out1 = layers_module.Dropout(0.5, name="dropout")(in1)
out2 = layers_module.Dense(4, name="dense_1")(in2)
model = training_module.Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
optimizer = "rmsprop"
model.compile(
optimizer,
"mse",
metrics=["mae", metrics_module.CategoricalAccuracy()],
)
model.fit(
test_inputs,
test_outputs,
epochs=1,
batch_size=2,
validation_split=0.5,
)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@test_combinations.run_all_keras_modes
def test_compile_with_sparse_placeholders(self):
inputs = layers_module.Input(shape=(10,), sparse=True)
weights = tf.Variable(
np.ones((10, 1)).astype(np.float32), name="weights"
)
weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)
output_layer = layers_module.Lambda(weights_mult)(inputs)
model = training_module.Model([inputs], output_layer)
model.compile(
loss="binary_crossentropy",
optimizer="adam",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
@test_combinations.run_all_keras_modes
def test_that_trainable_disables_updates(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = layers_module.Input(shape=(4,))
layer = layers_module.BatchNormalization(input_shape=(4,))
b = layer(a)
model = training_module.Model(a, b)
model.trainable = False
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertEmpty(model.updates)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertEmpty(model.updates)
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertAllGreater(len(model.updates), 0)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertEmpty(model.updates)
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_weight_deduplication_in_methods(self):
inp = layers_module.Input(shape=(1,))
bn = layers_module.BatchNormalization()
d = layers_module.Dense(1)
m0 = training_module.Model(inp, d(bn(inp)))
m1 = training_module.Model(inp, d(bn(inp)))
x0 = m0(inp)
x1 = m1(inp)
x = layers_module.Add()([x0, x1])
model = training_module.Model(inp, x)
self.assertLen(model.trainable_weights, 4)
self.assertLen(model.non_trainable_weights, 2)
self.assertLen(model.weights, 6)
@test_combinations.run_all_keras_modes
def test_weight_deduplication(self):
class WatchingLayer(layers_module.Layer):
def __init__(self, dense_to_track):
# This will cause the kernel and bias to be double counted,
# effectively doubling the learning rate if weights are not
# deduped.
self._kernel = dense_to_track.kernel
self._bias = dense_to_track.bias
super().__init__()
inp = layers_module.Input(shape=(1,))
dense_layer = layers_module.Dense(1)
dense_output = dense_layer(inp) # This will build the dense kernel
# Deterministically set weights to make the test repeatable.
dense_layer.set_weights([np.ones((1, 1)), np.zeros((1,))])
output = WatchingLayer(dense_layer)(dense_output)
model = training_module.Model(inp, output)
# 0.25 is the edge of the radius of convergence for the double apply
# case. At lr=0.24, the double apply case will very slowly descend
# while the correct case will drop very quickly.
model.compile(
loss="mse",
optimizer=optimizer_legacy.gradient_descent.SGD(0.24),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones((64 * 2,))
y = 4.5 * x - 3.0
history = model.fit(x, y, batch_size=64, epochs=2, verbose=2)
# If the gradient apply is duplicated then the loss after 2 epochs will
# be ~0.15, compared to the correct answer of O(1e-7).
self.assertLess(history.history["loss"][-1], 1e-6)
@test_combinations.run_all_keras_modes
def test_weight_shared_across_layers(self):
class AddWeightLayer(layers_module.Layer):
def __init__(self, trainable_var, non_trainable_var):
self.trainable_var = trainable_var
self.non_trainable_var = non_trainable_var
super().__init__()
def call(self, inputs):
return inputs + self.trainable_var
class LayerWithWeightSharedLayers(layers_module.Layer):
def __init__(self):
super().__init__()
shared_trainable_var = tf.Variable(1.0)
shared_non_trainable_var = tf.Variable(1.0, trainable=False)
self.layer1 = AddWeightLayer(
shared_trainable_var, shared_non_trainable_var
)
self.layer2 = AddWeightLayer(
shared_trainable_var, shared_non_trainable_var
)
def call(self, inputs):
return self.layer2(self.layer1(inputs))
l = LayerWithWeightSharedLayers()
layers = list(l._flatten_layers(include_self=False, recursive=False))
self.assertEqual(layers, [l.layer1, l.layer2])
self.assertEqual(
l.variables, [l.layer1.trainable_var, l.layer1.non_trainable_var]
)
self.assertEqual(l.trainable_variables, [l.layer1.trainable_var])
self.assertEqual(
l.non_trainable_variables, [l.layer1.non_trainable_var]
)
self.assertLen(l.get_weights(), 2)
@test_combinations.run_all_keras_modes
def test_weight_tracking_for_template(self):
def variable_scoped_function(trainable=True):
return tf.compat.v1.get_variable(
"dummy",
shape=[1],
trainable=trainable,
initializer=tf.compat.v1.zeros_initializer(),
)
def nested_template():
nested1 = tf.compat.v1.make_template(
"nested", variable_scoped_function
)
nested2 = tf.compat.v1.make_template(
"nested", variable_scoped_function
)
v1 = nested1()
v2 = nested2()
# nested1 and nested2 should not share variables
self.assertIsNot(v1, v2)
# Variables created by nested1 should be isolated from variables
# created by nested2.
self.assertEqual(1, len(nested1.variables))
self.assertEqual(1, len(nested2.variables))
self.assertIs(nested1.variables[0], v1)
self.assertIs(nested2.variables[0], v2)
self.assertEqual(1, len(nested1.trainable_variables))
self.assertEqual(1, len(nested2.trainable_variables))
self.assertIs(nested1.trainable_variables[0], v1)
self.assertIs(nested2.trainable_variables[0], v2)
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
return v1, v2
tmpl1 = tf.compat.v1.make_template("s1", nested_template)
tmpl2 = tf.compat.v1.make_template("s1", nested_template)
v1, v2 = tmpl1()
v5, v6 = tmpl2()
model = training_module.Model()
model.template = tmpl1
self.assertEqual(2, len(model.variables))
self.assertIs(model.variables[0], v1)
self.assertIs(model.variables[1], v2)
self.assertEqual(2, len(model.variables))
self.assertIs(model.trainable_variables[0], v1)
self.assertIs(model.trainable_variables[1], v2)
self.assertEqual(len(model.non_trainable_variables), 0)
model.templates = [tmpl2]
for v, w in zip(model.variables, [v1, v2, v5, v6]):
self.assertIs(v, w)
for v, w in zip(model.trainable_variables, [v1, v2, v5, v6]):
self.assertIs(v, w)
self.assertEqual(len(model.non_trainable_variables), 0)
# Make sure losses, layers, and updates aren't broken by having a
# Template in the mix, which does not expose any updates or losses.
self.assertEqual([], model.layers)
self.assertEqual([], model.updates)
self.assertEqual([], model.losses)
self.assertEqual([], model.templates.layers)
self.assertEqual([], model.templates.updates)
self.assertEqual([], model.templates.losses)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_logs_passed_to_callbacks(self):
input_dim = 5
num_classes = 1
class TestCallback(Callback):
def __init__(self):
super().__init__()
self.epoch_end_logs = None
self.batch_end_logs = None
self.epoch_end_call_count = 0
self.batch_end_call_count = 0
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_logs = logs
self.epoch_end_call_count += 1
def on_batch_end(self, batch, logs=None):
self.batch_end_logs = logs
self.batch_end_call_count += 1
model = test_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim
)
model.compile(
loss="binary_crossentropy",
metrics=["acc"],
weighted_metrics=["mae"],
optimizer=RMSPropOptimizer(learning_rate=0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
np.random.seed(1337)
(x_train, y_train), (_, _) = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes,
)
test_callback = TestCallback()
model.fit(
x_train,
y_train,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[test_callback],
validation_data=(x_train, y_train),
)
self.assertEqual(test_callback.batch_end_call_count, 10)
self.assertEqual(test_callback.epoch_end_call_count, 2)
self.assertSetEqual(
set(test_callback.batch_end_logs.keys()),
set(["acc", "loss", "mae"]),
)
self.assertSetEqual(
set(test_callback.epoch_end_logs.keys()),
set(["acc", "loss", "mae", "val_acc", "val_loss", "val_mae"]),
)
@test_combinations.run_all_keras_modes
def test_mismatched_output_shape_and_target_shape(self):
model = sequential.Sequential(
[
layers_module.Dense(2, input_shape=(3, 4)),
layers_module.Dense(5),
]
)
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss="sparse_categorical_crossentropy",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test with Numpy data
x_train = np.random.random((10, 3, 4)).astype(np.float32)
y_train = np.random.randint(0, 5, size=(10, 3)).astype(np.float32)
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with iterator
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat(10)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2)
if tf.executing_eagerly():
# Test with eager execution
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss="sparse_categorical_crossentropy",
run_eagerly=True,
)
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with eager execution and iterator
model.fit(dataset, epochs=1, steps_per_epoch=2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_losses_in_defun(self):
layer = layers_module.Dense(1, kernel_regularizer="l1")
layer(tf.ones([1, 10]))
@tf.function
def get_losses():
return layer.losses
self.assertAllEqual(
self.evaluate(layer.losses), self.evaluate(get_losses())
)
@test_combinations.run_all_keras_modes
def test_logging(self):
mock_stdout = io.StringIO()
model = sequential.Sequential()
model.add(layers_module.Dense(10, activation="relu"))
model.add(layers_module.Dense(1, activation="sigmoid"))
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss="binary_crossentropy",
run_eagerly=test_utils.should_run_eagerly(),
)
io_utils.enable_interactive_logging()
with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout):
model.fit(
np.ones((10, 10), "float32"),
np.ones((10, 1), "float32"),
epochs=10,
)
self.assertTrue("Epoch 5/10" in mock_stdout.getvalue())
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_training_with_loss_instance(self):
a = layers_module.Input(shape=(3,), name="input_a")
b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = layers_module.Dropout(0.5, name="dropout")(c)
model = training_module.Model([a, b], [d, e])
loss_weights = [1.0, 0.5]
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics_module.CategoricalAccuracy(), "mae"],
loss_weights=loss_weights,
)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_static_batch_in_input_layer(self):
if tf.executing_eagerly():
self.skipTest("Not inferred in eager.")
class Counter(Callback):
def __init__(self):
self.batches = 0
def on_batch_end(self, batch, logs=None):
self.batches += 1
x, y = np.ones((64, 10), "float32"), np.ones((64, 1), "float32")
for batch_size, expected_batches in [(None, 2), (4, 16)]:
inputs = input_layer.Input(batch_size=batch_size, shape=(10,))
outputs = layers_module.Dense(1, activation="sigmoid")(inputs)
model = training_module.Model(inputs, outputs)
model.compile(
optimizer_legacy.adam.Adam(0.001), "binary_crossentropy"
)
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
model = sequential.Sequential(
[layers_module.Dense(1, batch_input_shape=(batch_size, 10))]
)
model.compile(
optimizer_legacy.adam.Adam(0.001), "binary_crossentropy"
)
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_static_batch_in_input_layer_consistency_checks(self):
if tf.executing_eagerly():
self.skipTest("Not inferred in eager.")
x, y = np.ones((64, 10), "float32"), np.ones((64, 1), "float32")
inputs = input_layer.Input(batch_size=2, shape=(10,))
outputs = layers_module.Dense(1, activation="sigmoid")(inputs)
model = training_module.Model(inputs, outputs)
model.compile(optimizer_legacy.adam.Adam(0.001), "binary_crossentropy")
with self.assertRaisesRegex(
ValueError, "incompatible with the specified batch size"
):
model.fit(x, y, batch_size=4)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_compatible_batch_size_functional_model(self):
class MyLayer(layers_module.Layer):
def call(self, inputs):
return tf.concat(inputs, axis=0)
input1 = input_layer.Input(batch_size=2, shape=(10,))
input2 = input_layer.Input(batch_size=3, shape=(10,))
outputs = MyLayer()([input1, input2])
with tf.compat.v1.test.mock.patch.object(
logging, "warning"
) as mock_warn:
training_module.Model([input1, input2], outputs)
self.assertEqual(
mock_warn.call_args_list[0][0][0],
"Found incompatible static batch sizes among the inputs. "
"Batch sizes: [2, 3]",
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_calling_subclass_model_on_different_datasets(self):
class SubclassedModel(training_module.Model):
def call(self, inputs):
return inputs * 2
model = SubclassedModel()
dataset_one = tf.data.Dataset.from_tensor_slices([[0], [1]]).batch(2)
dataset_two = tf.data.Dataset.from_tensor_slices(
[[3], [4], [5], [6], [7], [8]]
).batch(2)
self.assertAllEqual([[0], [2]], model.predict(dataset_one, steps=1))
self.assertAllEqual(
[[6], [8], [10], [12]], model.predict(dataset_two, steps=2)
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_training_on_sparse_categorical_crossentropy_loss_with_softmax(
self,
):
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = np.random.randint(0, 1, size=(100, 1))
reference_model = test_utils.get_small_sequential_mlp(
16, 2, input_dim=4
)
reference_model.compile(
loss="sparse_categorical_crossentropy",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True,
)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = test_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(
loss="sparse_categorical_crossentropy",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False,
)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_training_on_categorical_crossentropy_loss_with_softmax(self):
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = np_utils.to_categorical(
np.random.randint(0, 1, size=(100, 1)), 2
)
reference_model = test_utils.get_small_sequential_mlp(
16, 2, input_dim=4
)
reference_model.compile(
loss="categorical_crossentropy",
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=True,
)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = test_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(
loss="categorical_crossentropy",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False,
)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_training_on_binary_crossentropy_loss(self):
train_x = np.ones((100, 4), dtype=np.float32)
train_y = np.ones((100, 1), dtype=np.float32)
reference_model = test_utils.get_small_sequential_mlp(
16, 1, input_dim=4
)
reference_model.compile(
loss="binary_crossentropy",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True,
)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = test_utils.get_small_sequential_mlp(16, 1, input_dim=4)
test_model.compile(
loss="binary_crossentropy",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False,
)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
("default", 1, 4),
("integer_two", 2, 2),
("integer_four", 4, 1),
("simple_list", [1, 3, 4], 3),
("duplicated_list", [4, 2, 2], 2),
)
def test_validation_freq(self, validation_freq, expected_runs):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = test_utils.get_small_mlp(2, 1, 10)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
class ValCounter(Callback):
def __init__(self):
self.val_runs = 0
def on_test_begin(self, logs=None):
self.val_runs += 1
val_counter = ValCounter()
model.fit(
x,
y,
epochs=4,
validation_data=(x, y),
validation_freq=validation_freq,
callbacks=[val_counter],
)
self.assertEqual(val_counter.val_runs, expected_runs)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_validation_steps_without_data(self):
if tf.executing_eagerly():
self.skipTest("Check removed in new `fit`")
x, y = np.ones((10, 10)), np.ones((10, 1))
model = test_utils.get_small_mlp(2, 1, 10)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
with self.assertRaisesRegex(
ValueError,
"`validation_steps` should not be specified if "
"`validation_data` is None.",
):
model.fit(x, y, epochs=4, validation_data=None, validation_steps=3)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_layer_with_variable_output(self):
class VariableOutputLayer(layers_module.Layer):
def build(self, input_shape):
self.v = self.add_weight(
"output_var", shape=(2, 5), initializer="ones"
)
def call(self, inputs):
return self.v
model = test_utils.get_model_from_layers(
[VariableOutputLayer(), layers_module.Dense(1)], input_shape=(10,)
)
# TODO(omalleyt): Make this work with `run_eagerly=True`.
model.compile("sgd", "mse", run_eagerly=False)
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=5)
self.assertLen(model.trainable_variables, 3)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
@test_utils.enable_v2_dtype_behavior
def test_model_dtype(self):
class AssertTypeLayer(layers_module.Layer):
def call(self, inputs):
assert inputs.dtype.name == self.dtype, (
"Input tensor has type %s which does not match assert "
"type %s" % (inputs.dtype.name, self.assert_type)
)
return inputs + 1.0
for dtype in ("float16", "float32", "float64"):
model = test_utils.get_model_from_layers(
[AssertTypeLayer(dtype=dtype)], input_shape=(10,)
)
model.compile(
"sgd", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x = np.ones((10, 10))
y = np.ones((10, 10))
model.fit(x, y)
model.test_on_batch(x, y)
model(x)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
@test_utils.enable_v2_dtype_behavior
def test_model_input_dtype(self):
model = test_utils.get_small_mlp(1, 10, 10)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x = np.ones((10, 10)).astype(np.float64)
y = np.ones((10, 10)).astype(np.float64)
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
self.assertEqual(model._compute_dtype, "float32")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_subclassed_model_with_training_arg(self):
class LayerWithTrainingArg(layers_module.Layer):
def call(self, inputs, training=None):
self.training = training
return inputs
class ModelWithTrainingArg(training_module.Model):
def __init__(self):
super().__init__()
self.l1 = LayerWithTrainingArg()
def call(self, inputs, training=None):
self.training = training
inputs = self.l1(inputs, training=training)
return inputs
x = np.zeros((1, 2))
model = ModelWithTrainingArg()
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, x, epochs=1)
if tf.executing_eagerly():
expected_training_arg = True
else:
expected_training_arg = backend.symbolic_learning_phase()
self.assertIs(model.training, expected_training_arg)
self.assertIs(model.l1.training, expected_training_arg)
@test_combinations.run_all_keras_modes
def test_error_when_model_is_not_compiled(self):
inputs = input_layer.Input(shape=(1,))
outputs = layers_module.Dense(1)(inputs)
model = training_module.Model(inputs, outputs)
with self.assertRaisesRegex(RuntimeError, "must compile your model"):
model.fit(np.ones((1, 1)), np.ones((1, 1)))
class MyModel(training_module.Model):
def call(self, x):
self.add_loss(tf.reduce_sum(x))
return x
model = MyModel()
with self.assertRaisesRegex(RuntimeError, "must compile your model"):
model.fit(np.random.random((32, 1)), epochs=2)
@test_combinations.run_all_keras_modes
@test_utils.enable_v2_dtype_behavior
def test_losses_of_different_dtypes(self):
inp = input_layer.Input(shape=(2,))
out_1 = layers_module.Dense(
2, dtype="float32", kernel_regularizer="l2"
)(inp)
out_2 = layers_module.Dense(
2, dtype="float16", kernel_regularizer="l2"
)(inp)
model = training_module.Model(inp, [out_1, out_2])
extra_loss = tf.reduce_sum(tf.cast(out_2, "float64"))
model.add_loss(extra_loss)
model.compile(
"sgd", ["mse", "mse"], run_eagerly=test_utils.should_run_eagerly()
)
x, y = np.ones((10, 2)), np.ones((10, 2))
model.fit(x, [y, y])
@test_combinations.run_all_keras_modes
@test_utils.enable_v2_dtype_behavior
def test_losses_of_different_dtypes_with_subclassed_model(self):
class MyModel(training_module.Model):
def build(self, _):
self.dense = layers_module.Dense(2)
def call(self, inputs):
self.add_loss(tf.cast(tf.nn.l2_loss(inputs), "float64"))
return self.dense(inputs)
model = MyModel(dtype="float32")
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 2)), np.ones((10, 2))
model.fit(x, y)
@test_combinations.run_all_keras_modes
@test_utils.enable_v2_dtype_behavior
def test_regularizer_of_different_dtype(self):
inp = input_layer.Input(shape=(2,))
def regularizer(weight):
return tf.cast(tf.nn.l2_loss(weight), "float64")
out = layers_module.Dense(
2, dtype="float32", kernel_regularizer=regularizer
)(inp)
model = training_module.Model(inp, out)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 2)), np.ones((10, 2))
model.fit(x, y)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_outputs_are_floats(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
model = sequential.Sequential([layers_module.Dense(1)])
model.compile(
"sgd",
"mse",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(x, y, epochs=2)
self.assertIsInstance(history.history["loss"][0], float)
self.assertIsInstance(history.history["accuracy"][0], float)
loss, accuracy = model.train_on_batch(x, y)
self.assertIsInstance(loss, float)
self.assertIsInstance(accuracy, float)
loss, accuracy = model.evaluate(x, y)
self.assertIsInstance(loss, float)
self.assertIsInstance(accuracy, float)
loss, accuracy = model.test_on_batch(x, y)
self.assertIsInstance(loss, float)
self.assertIsInstance(accuracy, float)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_int_output(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
model = sequential.Sequential([layers_module.Dense(1)])
class MyMetric(metrics_module.Metric):
def update_state(self, y_true, y_pred, sample_weight=None):
del y_true, y_pred, sample_weight
def result(self):
return tf.constant(1, dtype="int64")
model.compile(
"sgd",
"mse",
metrics=[MyMetric()],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(x, y, epochs=2)
self.assertIsInstance(history.history["my_metric"][0], int)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_utils.enable_v2_dtype_behavior
def test_mixed_precision(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
policy.set_global_policy("mixed_float16")
model = sequential.Sequential([layers_module.Dense(1)])
optimizer = sgd_experimental.SGD()
model.compile(
optimizer,
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=2)
policy.set_global_policy("float32")
@test_combinations.run_all_keras_modes
def test_calling_aggregate_gradient(self):
class _Optimizer(optimizer_legacy.gradient_descent.SGD):
"""Mock optimizer to check if _aggregate_gradient is called."""
_HAS_AGGREGATE_GRAD = True
def __init__(self):
self.aggregate_gradients_called = False
super().__init__(name="MyOptimizer")
def _aggregate_gradients(self, grads):
self.aggregate_gradients_called = True
return super()._aggregate_gradients(grads)
mock_optimizer = _Optimizer()
model = sequential.Sequential()
model.add(layers_module.Dense(10, activation="relu"))
model.compile(
mock_optimizer, "mse", run_eagerly=test_utils.should_run_eagerly()
)
x, y = np.ones((10, 10)), np.ones((10, 10))
model.fit(x, y)
self.assertEqual(model.optimizer.aggregate_gradients_called, True)
class _OptimizerOverrideApplyGradients(_Optimizer):
"""Override apply_gradients.
To test the case where the optimizer does not define the
experimental_aggregate_gradients parameter.
"""
_HAS_AGGREGATE_GRAD = False
def apply_gradients(self, grads_and_vars, name=None):
return super().apply_gradients(grads_and_vars, name)
mock_optimizer = _OptimizerOverrideApplyGradients()
model.compile(
mock_optimizer, "mse", run_eagerly=test_utils.should_run_eagerly()
)
x, y = np.ones((10, 10)), np.ones((10, 10))
model.fit(x, y)
self.assertEqual(model.optimizer.aggregate_gradients_called, True)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_gradients_are_none(self):
class DenseWithExtraWeight(layers_module.Dense):
def build(self, input_shape):
# Gradients w.r.t. extra_weights are None
self.extra_weight_1 = self.add_weight(
"extra_weight_1", shape=(), initializer="ones"
)
super().build(input_shape)
self.extra_weight_2 = self.add_weight(
"extra_weight_2", shape=(), initializer="ones"
)
model = sequential.Sequential(
[DenseWithExtraWeight(4, input_shape=(4,))]
)
# Test clipping can handle None gradients
opt = optimizer_legacy.adam.Adam(clipnorm=1.0, clipvalue=1.0)
model.compile(opt, "mse", run_eagerly=test_utils.should_run_eagerly())
inputs = np.random.normal(size=(64, 4))
targets = np.random.normal(size=(64, 4))
old_kernel = model.get_weights()[1]
model.fit(inputs, targets)
new_kernel = model.get_weights()[1]
self.assertNotAllEqual(old_kernel, new_kernel)
@test_combinations.run_all_keras_modes
def test_layer_ordering(self):
class MyLayer(layers_module.Layer):
pass
class MyModel(training_module.Model):
def __init__(self, name):
super().__init__(name=name)
self.weight = tf.Variable(0, name=name)
self.direct_sublayer = MyLayer(name="direct")
self.direct_sublayer.d = {"d": MyLayer(name="direct/dict")}
self.dict_sublayer = {"d": MyLayer(name="dict")}
self.dict_sublayer["d"].direct = MyLayer(name="dict/direct")
model = MyModel("model")
# All sublayers, including self and recursive sublayers.
self.assertEqual(
["model", "direct", "direct/dict", "dict", "dict/direct"],
[l.name for l in model._flatten_layers()],
)
# Only direct sublayers, including those in data structures.
self.assertEqual(["direct", "dict"], [l.name for l in model.layers])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_trainable_state_setting(self):
class UpdateLayer(layers_module.Layer):
def __init__(self):
super().__init__()
self.v = tf.Variable(0.0, trainable=False)
def call(self, x):
self.add_update(lambda: self.v.assign_add(1.0))
return x * self.v
layer = UpdateLayer()
model_with_updates = sequential.Sequential([layer])
model_with_updates.compile(
"sgd", "mse", run_eagerly=test_utils.should_run_eagerly()
)
layer.trainable = False
model_without_updates = sequential.Sequential([layer])
model_without_updates.compile(
"sgd", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x, y = np.ones((10, 1)), np.ones((10, 1))
self.assertEqual(self.evaluate(layer.v), 0.0)
model_with_updates.fit(x, y, batch_size=10)
# assign_add called.
self.assertEqual(self.evaluate(layer.v), 1.0)
model_without_updates.fit(x, y, batch_size=10)
# assign_add not called.
self.assertEqual(self.evaluate(layer.v), 1.0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
("numpy_array", "numpy_array"),
("dataset_array", "dataset_array"),
("dataset_dict", "dataset_dict"),
)
def test_single_input_no_tuple_wrapping(self, input_type):
x = np.ones((10, 1))
if input_type == "numpy_array":
batch_size = 3
expected_data_type = tf.Tensor
elif input_type == "dataset_array":
x = tf.data.Dataset.from_tensor_slices(x).batch(3)
batch_size = None
expected_data_type = tf.Tensor
else:
x = {"my_input": x}
x = tf.data.Dataset.from_tensor_slices(x).batch(3)
batch_size = None
expected_data_type = dict
test_case = self
class MyModel(training_module.Model):
def train_step(self, data):
# No tuple wrapping for single x input and no targets.
test_case.assertIsInstance(data, expected_data_type)
return super().train_step(data)
def test_step(self, data):
test_case.assertIsInstance(data, expected_data_type)
return super().test_step(data)
def predict_step(self, data):
test_case.assertIsInstance(data, expected_data_type)
return super().predict_step(data)
inputs = layers_module.Input(shape=(1,), name="my_input")
outputs = layers_module.Dense(1)(inputs)
model = MyModel(inputs, outputs)
model.add_loss(tf.reduce_sum(outputs))
model.compile("sgd")
model.fit(x, batch_size=batch_size)
model.evaluate(x, batch_size=batch_size)
model.predict(x, batch_size=batch_size)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
("custom_metrics", False, True),
("compiled_metrics", True, False),
("both_compiled_and_custom_metrics", True, True),
)
def test_evaluate_with_custom_test_step(
self, use_compiled_metrics, use_custom_metrics
):
class MyModel(training_module.Model):
def test_step(self, data):
x, y = data
pred = self(x)
metrics = {}
if use_compiled_metrics:
self.compiled_metrics.update_state(y, pred)
self.compiled_loss(y, pred)
for metric in self.metrics:
metrics[metric.name] = metric.result()
if use_custom_metrics:
custom_metrics = {
"mean": tf.reduce_mean(pred),
"sum": tf.reduce_sum(pred),
}
metrics.update(custom_metrics)
return metrics
inputs = layers_module.Input((2,))
outputs = layers_module.Dense(3)(inputs)
model = MyModel(inputs, outputs)
if use_compiled_metrics:
model.compile(
"adam",
"mse",
metrics=["mae", "mape"],
run_eagerly=test_utils.should_run_eagerly(),
)
else:
model.compile(
"adam", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x = np.random.random((4, 2))
y = np.random.random((4, 3))
results_list = model.evaluate(x, y)
results_dict = model.evaluate(x, y, return_dict=True)
self.assertLen(results_list, len(results_dict))
if use_compiled_metrics and use_custom_metrics:
self.assertLen(results_list, 5)
self.assertEqual(
results_list,
[
results_dict["loss"],
results_dict["mae"],
results_dict["mape"],
results_dict["mean"],
results_dict["sum"],
],
)
if use_compiled_metrics and not use_custom_metrics:
self.assertLen(results_list, 3)
self.assertEqual(
results_list,
[
results_dict["loss"],
results_dict["mae"],
results_dict["mape"],
],
)
if not use_compiled_metrics and use_custom_metrics:
self.assertLen(results_list, 2)
self.assertEqual(
results_list, [results_dict["mean"], results_dict["sum"]]
)
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_model_make_function(self):
layers = [
layers_module.Dense(10, dtype=np.float64),
layers_module.Dense(10, dtype=np.float64),
]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
original_train_function = model.make_train_function()
self.assertIsNotNone(original_train_function)
self.assertEqual(model.make_train_function(), original_train_function)
# Check that we regenerate it without reusing the cached version.
self.assertNotEqual(
model.make_train_function(force=True), original_train_function
)
original_test_function = model.make_test_function()
self.assertIsNotNone(original_test_function)
self.assertEqual(model.make_test_function(), original_test_function)
# Check that we regenerate it without reusing the cached version.
self.assertNotEqual(
model.make_test_function(force=True), original_test_function
)
original_predict_function = model.make_predict_function()
self.assertIsNotNone(original_predict_function)
self.assertEqual(
model.make_predict_function(), original_predict_function
)
# Check that we regenerate it without reusing the cached version.
self.assertNotEqual(
model.make_predict_function(force=True), original_predict_function
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_custom_compute_metrics(self):
class CustomMetric(metrics_module.Mean):
def sq_diff_plus_x(self, x, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
sq_diff_plus_x = tf.add(
x, tf.math.squared_difference(y_pred, y_true)
)
return backend.mean(sq_diff_plus_x, axis=-1)
def update_state(self, x, y_true, y_pred, sample_weight=None):
matches = self.sq_diff_plus_x(x, y_true, y_pred)
return super().update_state(matches)
class MyModel(sequential.Sequential):
def compute_metrics(self, x, y, y_pred, sample_weight):
metric_results = super().compute_metrics(
x, y, y_pred, sample_weight
)
self.custom_metric.update_state(x, y, y_pred, sample_weight)
metric_results[
"custom_metric_name"
] = self.custom_metric.result()
return metric_results
tensors = tf.random.uniform((10, 10)), tf.random.uniform((10,))
dataset = tf.data.Dataset.from_tensor_slices(tensors).repeat().batch(1)
model = MyModel([layers_module.Dense(10)])
model.custom_metric = CustomMetric("my_metric")
initial_result = model.custom_metric.result()
optimizer = optimizer_legacy.gradient_descent.SGD()
model.compile(optimizer, loss="mse", steps_per_execution=10)
model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=2)
after_fit_result = model.custom_metric.result()
self.assertEqual(self.evaluate(initial_result), 0.0)
self.assertNotEqual(
self.evaluate(initial_result), self.evaluate(after_fit_result)
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_custom_compute_loss(self):
class MyModel(training_module.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_metric = metrics_module.Mean(name="loss")
def compute_loss(self, x, y, y_pred, sample_weight):
loss = tf.reduce_mean(tf.math.squared_difference(y_pred, y))
loss += tf.add_n(self.losses)
self.loss_metric.update_state(loss)
return loss
def reset_metrics(self):
self.loss_metric.reset_states()
@property
def metrics(self):
return [self.loss_metric]
tensors = tf.random.uniform((10, 10)), tf.random.uniform((10,))
dataset = tf.data.Dataset.from_tensor_slices(tensors).repeat().batch(1)
inputs = layers_module.Input(shape=(10,), name="my_input")
outputs = layers_module.Dense(10)(inputs)
model = MyModel(inputs, outputs)
model.add_loss(tf.reduce_sum(outputs))
optimizer = optimizer_legacy.gradient_descent.SGD()
model.compile(optimizer, loss="mse", steps_per_execution=10)
history = model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertLen(history.history["loss"], 2)
self.assertAllClose(
history.history["loss"][1], model.loss_metric.result()
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
("mixed_float16", "mixed_float16"), ("float32", "float32")
)
def test_ema_overwrite(self, test_policy):
if not tf.__internal__.tf2.enabled():
self.skipTest("EMA optimizer is only available in TF2.")
policy.set_global_policy(test_policy)
model = sequential.Sequential()
model.add(input_layer.Input(shape=(4,)))
model.add(layers_module.Dense(1, activation="relu"))
tensors = tf.random.uniform((4, 4)), tf.random.uniform((4,))
dataset = tf.data.Dataset.from_tensor_slices(tensors).repeat().batch(1)
optimizer = sgd_experimental.SGD(use_ema=True, ema_momentum=1)
model.compile(optimizer, loss="mse", steps_per_execution=10)
initial_value = tf.Variable(model.trainable_variables[0])
history = model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertLen(history.history["loss"], 2)
self.assertAllClose(initial_value, model.trainable_variables[0])
policy.set_global_policy("float32")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_get_verbosity(self):
class MyStrategy(tf.distribute.Strategy):
def __init__(self):
self._should_use_with_coordinator = True
with self.assertRaisesRegex(ValueError, "`verbose=1` is not allowed"):
training_module._get_verbosity(1, MyStrategy())
io_utils.enable_interactive_logging()
self.assertEqual(
training_module._get_verbosity("auto", MyStrategy()), 2
)
self.assertEqual(
training_module._get_verbosity(
"auto", tf.distribute.MirroredStrategy()
),
1,
)
self.assertEqual(
training_module._get_verbosity(2, tf.distribute.MirroredStrategy()),
2,
)
io_utils.disable_interactive_logging()
self.assertEqual(
training_module._get_verbosity(
"auto", tf.distribute.MirroredStrategy()
),
2,
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_save_spec(self):
class Model(training_module.Model):
def call(
self, arg_input_1, arg_input_2, keyword_input, training=None
):
return 0
# Test subclassed model save specs.
model = Model()
model(
tf.ones([1, 1]),
tf.ones([2, 2]),
keyword_input=tf.ones([3, 3]),
training=False,
)
spec = model.save_spec(dynamic_batch=False)
self.assertEqual(spec[0][0].shape.as_list(), [1, 1])
self.assertEqual(spec[0][1].shape.as_list(), [2, 2])
self.assertEqual(spec[1]["keyword_input"].shape.as_list(), [3, 3])
spec = model.save_spec(dynamic_batch=True)
self.assertEqual(spec[0][0].shape.as_list(), [None, 1])
# Test functional model save specs.
input_1 = layers_module.Input((1,), batch_size=1)
input_2 = layers_module.Input((2,), batch_size=2)
input_3 = layers_module.Input((3,), batch_size=3)
output = model(input_1, input_2, keyword_input=input_3, training=True)
functional = training_module.Model([input_1, input_2, input_3], output)
# Functional models should ignore dynamic_batch if the input layers have
# a known batch size.
spec = functional.save_spec(dynamic_batch=True)
input_specs = spec[0][0]
self.assertEqual(input_specs[0].shape.as_list(), [1, 1])
self.assertEqual(input_specs[1].shape.as_list(), [2, 2])
self.assertEqual(input_specs[2].shape.as_list(), [3, 3])
class TestAutotuneSPE(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_compile_fit_with_jit_compile(self):
# Test with jit_compile = True
model = sequential.Sequential([layers_module.Dense(1)])
model.compile(
"sgd",
loss="mse",
run_eagerly=False,
jit_compile=True,
steps_per_execution="auto",
)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
# Test fcompile fit for a RNN model
model = sequential.Sequential()
model.add(
layers_module.TimeDistributed(
layers_module.Embedding(5, 6, mask_zero=True),
input_shape=(None, None),
)
) # N by t_1 by t_2 by 6
model.add(
layers_module.TimeDistributed(
layers_module.SimpleRNN(7, return_sequences=True)
)
)
model.add(
layers_module.TimeDistributed(
layers_module.SimpleRNN(8, return_sequences=False)
)
)
model.add(layers_module.SimpleRNN(1, return_sequences=False))
model.compile(
optimizer="sgd",
loss="mse",
jit_compile=True,
steps_per_execution="auto",
)
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4), dtype="int32"
)
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(
model_input, np.random.random((10, 1)), epochs=1, batch_size=10
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_compile_fit_evaluate_predict_with_mirrored_strategy(self):
# Test with jit_compile = True
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = sequential.Sequential([layers_module.Dense(1)])
model.compile(
"sgd",
loss="mse",
run_eagerly=False,
jit_compile=True,
steps_per_execution="auto",
)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
model.evaluate(x, y)
model.predict(x)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_spe_tune_compile_fit_then_false_predict(self):
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = sequential.Sequential([layers_module.Dense(1)])
model.compile(
"sgd",
loss="mse",
run_eagerly=False,
jit_compile=True,
steps_per_execution="auto",
)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
model.evaluate(x, y)
model.autotune_steps_per_execution = False
model.predict(x)
assert model.autotune_steps_per_execution == False
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_spe_tune_set_after_compile(self):
model = sequential.Sequential([layers_module.Dense(1)])
model.compile(
"sgd",
loss="mse",
run_eagerly=False,
jit_compile=True,
steps_per_execution=5,
)
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
assert model._steps_per_execution_tuner is None
model.autotune_steps_per_execution = True
model.fit(x, y, epochs=2)
assert model.steps_per_execution.numpy().item() == 5
assert model._steps_per_execution_tuner
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_spe_tune_set_before_compile(self):
model = sequential.Sequential([layers_module.Dense(1)])
model.steps_per_execution = 5
model.compile(
"sgd",
loss="mse",
run_eagerly=False,
jit_compile=True,
steps_per_execution="auto",
)
assert model.steps_per_execution.numpy().item() == 5
assert model._steps_per_execution_tuner
x, y = np.ones((10, 1)), np.ones((10, 1))
model.fit(x, y, epochs=2)
class TestExceptionsAndWarnings(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_combinations.run_with_all_model_types
def test_fit_on_no_output(self):
inputs = layers_module.Input((3,))
outputs = layers_module.Dense(2)(inputs)
model = training_module.Model(inputs, outputs)
model.compile("rmsprop", "mse")
x = np.zeros((32, 3))
with self.assertRaisesRegex(ValueError, "Target data is missing..*"):
model.fit(x)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_combinations.run_with_all_model_types
def test_fit_on_wrong_output_type(self):
inputs1 = layers_module.Input((3,), name="a")
inputs2 = layers_module.Input((3,), name="b")
x = layers_module.Concatenate()([inputs1, inputs2])
outputs = layers_module.Dense(2, name="c")(x)
model = training_module.Model([inputs1, inputs2], outputs)
model.compile("rmsprop", "mse")
x = np.zeros((32, 3))
y = np.zeros((32, 2))
with self.assertRaisesRegex(ValueError, "Target data is missing..*"):
model.fit({"a": x, "b": x, "c": y})
@test_combinations.run_all_keras_modes
def test_compile_warning_for_loss_missing_output(self):
with self.cached_session():
inp = layers_module.Input(shape=(16,), name="input_a")
out_1 = layers_module.Dense(8, name="dense_1")(inp)
out_2 = layers_module.Dense(
3, activation="softmax", name="dense_2"
)(out_1)
model = training_module.Model(inputs=[inp], outputs=[out_1, out_2])
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
loss={
"dense_2": "categorical_crossentropy",
},
metrics={
"dense_2": "categorical_accuracy",
"dense_1": metrics_module.CategoricalAccuracy(),
},
run_eagerly=test_utils.should_run_eagerly(),
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_predict_error_with_empty_x(self):
inputs = layers_module.Input(shape=(2,))
outputs = layers_module.Dense(4)(inputs)
model = training_module.Model(inputs=inputs, outputs=outputs)
model.compile(loss="mse")
with self.assertRaisesRegex(
ValueError, "Expected input data to be non-empty."
):
model.predict(np.array([]))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
("dynamic", 0, False),
("dynamic_multistep", 10, False),
("static", 0, True),
("static_multistep", 10, True),
)
def test_predict_structured(self, spe, static_batch):
inputs = layers_module.Input(shape=(2,))
outputs = layers_module.Dense(2)(inputs)
model = training_module.Model(
inputs=inputs,
outputs={"out": outputs},
)
model.compile(
loss="mse",
steps_per_execution=spe,
run_eagerly=test_utils.should_run_eagerly(),
)
xdata = np.random.uniform(size=(8, 2)).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((xdata, xdata))
dataset = dataset.batch(8, drop_remainder=static_batch)
ret = model.predict(dataset, steps=1)
tf.nest.assert_same_structure(ret, {"out": ""})
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_on_batch_error_inconsistent_batch_size(self):
input_node1 = layers_module.Input(shape=(5,))
input_node2 = layers_module.Input(shape=(5,))
output_node = layers_module.Concatenate()([input_node1, input_node2])
output_node = layers_module.Dense(4)(output_node)
model = training_module.Model([input_node1, input_node2], output_node)
model.compile(loss="mse")
with self.assertRaisesRegex(
ValueError, "Data cardinality is ambiguous"
):
model.train_on_batch(
[np.ones((10, 5)), np.ones((10, 5))], np.ones((11, 4))
)
with self.assertRaisesRegex(
ValueError, "Data cardinality is ambiguous"
):
model.test_on_batch(
[np.ones((10, 5)), np.ones((10, 5))], np.ones((11, 4))
)
with self.assertRaisesRegex(
ValueError, "Data cardinality is ambiguous"
):
model.predict_on_batch([np.ones((10, 5)), np.ones((11, 5))])
class LossWeightingTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_class_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 0.5
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = test_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim
)
model.compile(
loss="categorical_crossentropy",
metrics=["acc", metrics_module.CategoricalAccuracy()],
weighted_metrics=["mae", metrics_module.CategoricalAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=learning_rate),
run_eagerly=test_utils.should_run_eagerly(),
)
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes,
)
int_y_test = y_test.copy()
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.0) for i in range(num_classes)])
class_weight[weighted_class] = weight
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train),
)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight,
)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight,
validation_split=0.1,
)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
class_weight=class_weight,
)
ref_score = model.evaluate(x_test, y_test, verbose=0) # noqa: F841
score = model.evaluate( # noqa: F841
x_test[test_ids, :], y_test[test_ids, :], verbose=0
)
# TODO(b/152990697): Fix the class weights test here.
# self.assertLess(score[0], ref_score[0])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_segmentation_class_weights(self):
num_channels = 3
num_classes = 5
batch_size = 2
image_width = 8
input_shape = (batch_size, image_width, image_width, num_channels)
output_shape = (batch_size, image_width, image_width, num_classes)
model = sequential.Sequential([layers_module.Conv2D(num_classes, 1)])
model.compile(
loss="categorical_crossentropy",
metrics=["acc", metrics_module.CategoricalAccuracy()],
weighted_metrics=["mae", metrics_module.CategoricalAccuracy()],
optimizer="adam",
run_eagerly=test_utils.should_run_eagerly(),
)
x = tf.random.uniform(input_shape)
y = tf.random.uniform(output_shape, dtype=tf.int32, maxval=num_classes)
# Class weights are just the class value + 1
class_weight = dict([(i, i + 1) for i in range(num_classes)])
# This test simply asserts that the model can be compiled and fit
# can run without error. Verification that the class weights are
# applied correctly is performed in data_adapter_test.
model.fit(x, y, class_weight=class_weight, steps_per_epoch=1)
sample_weight = np.array([x + 1 for x in range(batch_size)])
model.fit(
x,
y,
class_weight=class_weight,
sample_weight=sample_weight,
steps_per_epoch=1,
)
@test_combinations.run_all_keras_modes
def test_temporal_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.0
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = sequential.Sequential()
model.add(
layers_module.TimeDistributed(
layers_module.Dense(num_classes),
input_shape=(timesteps, input_dim),
)
)
model.add(layers_module.Activation("softmax"))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes,
)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = weight
temporal_x_train = np.reshape(
x_train, (len(x_train), 1, x_train.shape[1])
)
temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1)
temporal_x_test = np.reshape(
x_test, (len(x_test), 1, x_test.shape[1])
)
temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1)
temporal_y_train = np.reshape(
y_train, (len(y_train), 1, y_train.shape[1])
)
temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1)
temporal_y_test = np.reshape(
y_test, (len(y_test), 1, y_test.shape[1])
)
temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1)
temporal_sample_weight = np.reshape(
sample_weight, (len(sample_weight), 1)
)
temporal_sample_weight = np.repeat(
temporal_sample_weight, timesteps, axis=1
)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["acc", metrics_module.CategoricalAccuracy()],
weighted_metrics=["mae", metrics_module.CategoricalAccuracy()],
sample_weight_mode="temporal",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight,
)
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight,
validation_split=0.1,
)
model.train_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size],
)
model.test_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size],
)
ref_score = model.evaluate(
temporal_x_test, temporal_y_test, verbose=0
)
if not tf.executing_eagerly():
score = model.evaluate(
temporal_x_test[test_ids],
temporal_y_test[test_ids],
verbose=0,
)
self.assertLess(score[0], ref_score[0])
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types(exclude_models="sequential")
def test_fit_with_incorrect_weights(self):
input_a = layers_module.Input(shape=(3,), name="input_a")
input_b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(2, name="output_1")
dropout = layers_module.Dropout(0.5, name="output_2")
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = test_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer="adam",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.random.random((10, 3))
y = np.random.random((10, 2))
with self.assertRaises(ValueError):
model.fit([x, x], [y, y], epochs=1, sample_weight={"unknown": x})
with self.assertRaises(ValueError):
model.fit([x, x], [y, y], epochs=1, class_weight={"unknown": 1})
@test_combinations.run_all_keras_modes
def test_default_sample_weight(self):
"""Verifies that fit works without having to set sample_weight."""
num_classes = 5
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = sequential.Sequential()
model.add(
layers_module.TimeDistributed(
layers_module.Dense(num_classes),
input_shape=(timesteps, input_dim),
)
)
x = np.random.random((10, timesteps, input_dim))
y = np.random.random((10, timesteps, num_classes))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
# sample_weight_mode is a list and mode value is None
model.compile(
optimizer,
loss="mse",
sample_weight_mode=[None],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a list and mode value is `temporal`
model.compile(
optimizer,
loss="mse",
sample_weight_mode=["temporal"],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is None
model.compile(
optimizer,
loss="mse",
sample_weight_mode={"time_distributed": None},
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is `temporal`
model.compile(
optimizer,
loss="mse",
sample_weight_mode={"time_distributed": "temporal"},
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is None
model.compile(
optimizer,
loss="mse",
sample_weight_mode=None,
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is
# `temporal`
model.compile(
optimizer,
loss="mse",
sample_weight_mode="temporal",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=10)
def test_sample_weight_tensor(self):
"""Tests that sample weight may be defined as a tensor in the graph."""
with tf.compat.v1.get_default_graph().as_default():
# Create a simple pass-through model
inputs = layers_module.Input(shape=1, name="input_layer")
model = training_module.Model(inputs=inputs, outputs=inputs)
model.compile(loss="mean_absolute_error", optimizer="adam")
# Prepare sample weights iterator tensor
sample_weights = tf.constant([[0, 0.4, 1, 1], [2, 0.4, 0.3, 1]])
dataset = tf.data.Dataset.from_tensor_slices(sample_weights)
sample_weights = tf.compat.v1.data.make_one_shot_iterator(
dataset
).get_next()
sample_weights = training_utils_v1.standardize_sample_weights(
sample_weights, model.output_names
)
# Update model loss with sample weight tensor.
model._compile_weights_loss_and_weighted_metrics(sample_weights)
feeds = {
"input_layer:0": [[0], [0], [0], [0]],
"input_layer_target:0": [[1], [1], [1], [1]],
}
with self.cached_session() as sess:
self.assertAllClose(
(0.4 + 1 + 1) / 4,
sess.run(model.total_loss, feed_dict=feeds),
)
self.assertAllClose(
(2 + 0.4 + 0.3 + 1) / 4,
sess.run(model.total_loss, feed_dict=feeds),
)
@test_combinations.run_all_keras_modes
class MaskingTest(test_combinations.TestCase):
def _get_model(self, input_shape=None):
layers = [
layers_module.Masking(mask_value=0),
layers_module.TimeDistributed(
layers_module.Dense(1, kernel_initializer="one")
),
]
model = test_utils.get_model_from_layers(layers, input_shape)
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=test_utils.should_run_eagerly(),
)
return model
@test_combinations.run_with_all_model_types
def test_masking(self):
model = self._get_model(input_shape=(2, 1))
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
@test_combinations.run_with_all_model_types(exclude_models="functional")
def test_masking_deferred(self):
model = self._get_model()
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
def test_mask_argument_in_layer(self):
# Test that the mask argument gets correctly passed to a layer in the
# functional API.
class CustomMaskedLayer(layers_module.Layer):
def __init__(self):
super().__init__()
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
return inputs
def compute_output_shape(self, input_shape):
return input_shape
x = np.random.random((5, 3))
inputs = layers_module.Input((3,))
masked = layers_module.Masking(mask_value=0)(inputs)
outputs = CustomMaskedLayer()(masked)
model = training_module.Model(inputs, outputs)
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=test_utils.should_run_eagerly(),
)
y = np.random.random((5, 3))
model.train_on_batch(x, y)
@test_combinations.run_all_keras_modes
class TestDynamicTrainability(test_combinations.TestCase):
def test_trainable_warning(self):
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = sequential.Sequential()
model.add(layers_module.Dense(2, input_dim=3))
model.trainable = False
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.trainable = True
model.train_on_batch(x, y)
self.assertRaises(Warning)
def test_trainable_argument(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = sequential.Sequential()
model.add(layers_module.Dense(2, input_dim=3, trainable=False))
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
# test with nesting
inputs = layers_module.Input(shape=(3,))
output = model(inputs)
model = training_module.Model(inputs, output)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
def test_layer_trainability_switch(self):
# with constructor argument, in Sequential
model = sequential.Sequential()
model.add(layers_module.Dense(2, trainable=False, input_dim=1))
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Sequential
model = sequential.Sequential()
layer = layers_module.Dense(2, input_dim=1)
model.add(layer)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
# with constructor argument, in Model
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(2, trainable=False)(x)
model = training_module.Model(x, y)
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Model
x = layers_module.Input(shape=(1,))
layer = layers_module.Dense(2)
y = layer(x)
model = training_module.Model(x, y)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_model_trainability_switch(self):
# a non-trainable model has no trainable weights
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(2)(x)
model = training_module.Model(x, y)
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
# same for Sequential
model = sequential.Sequential()
model.add(layers_module.Dense(2, input_dim=1))
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_nested_model_trainability(self):
# a Sequential inside a Model
inner_model = sequential.Sequential()
inner_model.add(layers_module.Dense(2, input_dim=1))
x = layers_module.Input(shape=(1,))
y = inner_model(x)
outer_model = training_module.Model(x, y)
self.assertListEqual(
outer_model.trainable_weights, inner_model.trainable_weights
)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Sequential inside a Sequential
inner_model = sequential.Sequential()
inner_model.add(layers_module.Dense(2, input_dim=1))
outer_model = sequential.Sequential()
outer_model.add(inner_model)
self.assertListEqual(
outer_model.trainable_weights, inner_model.trainable_weights
)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Model
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(2)(x)
inner_model = training_module.Model(x, y)
x = layers_module.Input(shape=(1,))
y = inner_model(x)
outer_model = training_module.Model(x, y)
self.assertListEqual(
outer_model.trainable_weights, inner_model.trainable_weights
)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Sequential
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(2)(x)
inner_model = training_module.Model(x, y)
outer_model = sequential.Sequential()
outer_model.add(inner_model)
self.assertListEqual(
outer_model.trainable_weights, inner_model.trainable_weights
)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
def test_gan_workflow(self):
shared_layer = layers_module.BatchNormalization()
inputs1 = input_layer.Input(10)
outputs1 = shared_layer(inputs1)
model1 = training_module.Model(inputs1, outputs1)
shared_layer.trainable = False
model1.compile(
"sgd", "mse", run_eagerly=test_utils.should_run_eagerly()
)
inputs2 = input_layer.Input(10)
outputs2 = shared_layer(inputs2)
model2 = training_module.Model(inputs2, outputs2)
shared_layer.trainable = True
model2.compile(
"sgd", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x, y = np.ones((10, 10)), np.ones((10, 10))
out1_0 = model1.predict_on_batch(x)
model1.train_on_batch(x, y)
out1_1 = model1.predict_on_batch(x)
self.assertAllClose(out1_0, out1_1)
out2_0 = model2.predict_on_batch(x)
model2.train_on_batch(x, y)
out2_1 = model2.predict_on_batch(x)
self.assertNotAllClose(out2_0, out2_1)
def test_toggle_value(self):
input_0 = layers_module.Input(shape=(1,))
dense_0 = layers_module.Dense(
1, kernel_initializer="ones", bias_initializer="ones"
)
dense_1 = layers_module.Dense(
1, kernel_initializer="ones", bias_initializer="ones"
)
result = layers_module.Add()([dense_0(input_0), dense_1(input_0)])
model = training_module.Model(input_0, result)
dense_0.trainable = False
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x = np.ones((10, 1))
y = 5 * x + 2
model.train_on_batch(x, y)
dense_0.trainable = True
model.train_on_batch(x, y)
kernel, bias = dense_0.get_weights()
self.assertAllEqual([kernel[0, 0], bias[0]], [1.0, 1.0])
kernel, bias = dense_1.get_weights()
self.assertAllClose([kernel[0, 0], bias[0]], [1.1176, 1.1176])
class TestTrainingWithDataTensors(test_combinations.TestCase):
def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):
with tf.Graph().as_default():
x = layers_module.Input(shape=(3,), name="input")
y = layers_module.Dense(4, name="dense")(x)
model = training_module.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = "mse"
model.compile(
optimizer,
loss,
metrics=["mae", metrics_module.CategoricalAccuracy()],
)
inputs = backend.zeros(shape=(10, 3))
targets = backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(
inputs,
targets,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=(inputs, targets),
validation_steps=2,
)
# Test with dynamic shape
inputs = tf.compat.v1.placeholder_with_default(
np.zeros((2, 3)), shape=tf.TensorShape([None, 3])
)
targets = tf.compat.v1.placeholder_with_default(
np.zeros((2, 4)), shape=tf.TensorShape([None, 4])
)
self.assertEqual(inputs.shape.dims[0].value, None)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(
inputs,
targets,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=(inputs, targets),
validation_steps=2,
)
def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):
a = layers_module.Input(shape=(3,), name="input_a")
b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = layers_module.Dropout(0.5, name="dropout")(c)
model = training_module.Model([a, b], [d, e])
optimizer = "rmsprop"
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=["mae", metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
)
input_a_tf = tf.zeros(shape=(10, 3))
input_b_tf = tf.zeros(shape=(10, 3))
output_d_tf = tf.zeros(shape=(10, 4))
output_e_tf = tf.zeros(shape=(10, 4))
model.fit(
[input_a_tf, input_b_tf],
[output_d_tf, output_e_tf],
epochs=1,
steps_per_epoch=2,
verbose=0,
)
model.train_on_batch(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf]
)
# Test with dictionary inputs
model.fit(
{"input_a": input_a_tf, "input_b": input_b_tf},
{"dense": output_d_tf, "dropout": output_e_tf},
epochs=1,
steps_per_epoch=2,
verbose=0,
)
model.fit(
{"input_a": input_a_tf, "input_b": input_b_tf},
{"dense": output_d_tf, "dropout": output_e_tf},
validation_data=(
{"input_a": input_a_tf, "input_b": input_b_tf},
{"dense": output_d_tf, "dropout": output_e_tf},
),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0,
)
model.train_on_batch(
{"input_a": input_a_tf, "input_b": input_b_tf},
{"dense": output_d_tf, "dropout": output_e_tf},
)
# Test with validation data
model.fit(
[input_a_tf, input_b_tf],
[output_d_tf, output_e_tf],
validation_data=(
[input_a_tf, input_b_tf],
[output_d_tf, output_e_tf],
),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0,
)
# Test evaluation / prediction methods
model.evaluate(
[input_a_tf, input_b_tf],
[output_d_tf, output_e_tf],
steps=2,
verbose=0,
)
model.predict([input_a_tf, input_b_tf], steps=2)
model.test_on_batch(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf]
)
@tf_test_utils.run_deprecated_v1
def test_model_with_input_feed_tensor(self):
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
with tf.Graph().as_default(), self.cached_session():
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
input_v = tf.Variable(input_a_np, dtype="float32")
self.evaluate(tf.compat.v1.variables_initializer([input_v]))
a = input_layer.Input(tensor=input_v)
b = input_layer.Input(shape=(3,), name="input_b")
a_2 = layers_module.Dense(4, name="dense_1")(a)
dp = layers_module.Dropout(0.5, name="dropout")
b_2 = dp(b)
model = training_module.Model([a, b], [a_2, b_2])
model.summary()
optimizer = "rmsprop"
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=["mean_squared_error"],
loss_weights=loss_weights,
sample_weight_mode=None,
)
# test train_on_batch
out = model.train_on_batch(input_b_np, [output_a_np, output_b_np])
out = model.train_on_batch(
{"input_b": input_b_np}, [output_a_np, output_b_np]
)
out = model.test_on_batch(
{"input_b": input_b_np}, [output_a_np, output_b_np]
)
out = model.predict_on_batch({"input_b": input_b_np})
# test fit
out = model.fit(
{"input_b": input_b_np},
[output_a_np, output_b_np],
epochs=1,
batch_size=10,
)
out = model.fit(
input_b_np, [output_a_np, output_b_np], epochs=1, batch_size=10
)
# test evaluate
out = model.evaluate(
{"input_b": input_b_np},
[output_a_np, output_b_np],
batch_size=10,
)
out = model.evaluate(
input_b_np, [output_a_np, output_b_np], batch_size=10
)
# test predict
out = model.predict({"input_b": input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
self.assertEqual(len(out), 2)
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
self.evaluate(tf.compat.v1.variables_initializer([input_v]))
a = input_layer.Input(tensor=input_v)
a_2 = layers_module.Dense(4, name="dense_1")(a)
a_2 = layers_module.Dropout(0.5, name="dropout")(a_2)
model = training_module.Model(a, a_2)
model.summary()
optimizer = "rmsprop"
loss = "mse"
model.compile(optimizer, loss, metrics=["mean_squared_error"])
# test train_on_batch
out = model.train_on_batch(None, output_a_np)
out = model.train_on_batch(None, output_a_np)
out = model.test_on_batch(None, output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([], output_a_np)
out = model.train_on_batch({}, output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=3)
_ = model.evaluate(None, output_a_np, steps=3)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
self.evaluate(tf.compat.v1.variables_initializer([input_v]))
a = input_layer.Input(tensor=input_v)
a_2 = layers_module.Dense(4, name="dense_1")(a)
model = training_module.Model(a, a_2)
model.summary()
optimizer = "rmsprop"
loss = "mse"
model.compile(optimizer, loss, metrics=["mean_squared_error"])
# test train_on_batch
out = model.train_on_batch(None, output_a_np)
out = model.train_on_batch(None, output_a_np)
out = model.test_on_batch(None, output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([], output_a_np)
out = model.train_on_batch({}, output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=10)
_ = model.evaluate(None, output_a_np, steps=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
@test_combinations.run_all_keras_modes
def test_model_with_partial_loss(self):
with self.cached_session():
a = input_layer.Input(shape=(3,), name="input_a")
a_2 = layers_module.Dense(4, name="dense_1")(a)
dp = layers_module.Dropout(0.5, name="dropout")
a_3 = dp(a_2)
model = training_module.Model(a, [a_2, a_3])
optimizer = "rmsprop"
loss = {"dropout": "mse"}
model.compile(optimizer, loss, metrics=["mae"])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, output_a_np)
# evaluate
_ = model.evaluate(input_a_np, output_a_np)
# Same without dropout.
a = input_layer.Input(shape=(3,), name="input_a")
a_2 = layers_module.Dense(4, name="dense_1")(a)
a_3 = layers_module.Dense(4, name="dense_2")(a_2)
model = training_module.Model(a, [a_2, a_3])
optimizer = "rmsprop"
loss = {"dense_2": "mse"}
model.compile(optimizer, loss, metrics={"dense_1": "mae"})
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, output_a_np)
# evaluate
_ = model.evaluate(input_a_np, output_a_np)
def test_model_with_external_loss(self):
with tf.Graph().as_default(), self.cached_session():
# None loss, only regularization loss.
a = input_layer.Input(shape=(3,), name="input_a")
a_2 = layers_module.Dense(
4,
name="dense_1",
kernel_regularizer="l1",
bias_regularizer="l2",
)(a)
dp = layers_module.Dropout(0.5, name="dropout")
a_3 = dp(a_2)
model = training_module.Model(a, [a_2, a_3])
optimizer = "rmsprop"
loss = None
model.compile(optimizer, loss, metrics=["mae"])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = input_layer.Input(shape=(3,), name="input_a")
a_2 = layers_module.Dense(4, name="dense_1")(a)
a_3 = layers_module.Dense(4, name="dense_2")(a)
model = training_module.Model(a, [a_2, a_3])
model.add_loss(backend.mean(a_3 + a_2))
optimizer = "rmsprop"
loss = None
model.compile(optimizer, loss, metrics=["mae"])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test model with no external data at all.
input_v = tf.Variable(input_a_np, dtype="float32")
self.evaluate(tf.compat.v1.variables_initializer([input_v]))
a = input_layer.Input(tensor=input_v)
a_2 = layers_module.Dense(4, name="dense_1")(a)
a_2 = layers_module.Dropout(0.5, name="dropout")(a_2)
model = training_module.Model(a, a_2)
model.add_loss(backend.mean(a_2))
model.compile(
optimizer="rmsprop", loss=None, metrics=["mean_squared_error"]
)
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# Test multi-output model with no external data at all.
self.evaluate(tf.compat.v1.variables_initializer([input_v]))
a = input_layer.Input(tensor=input_v)
a_1 = layers_module.Dense(4, name="dense_1")(a)
a_2 = layers_module.Dropout(0.5, name="dropout")(a_1)
model = training_module.Model(a, [a_1, a_2])
model.add_loss(backend.mean(a_2))
model.compile(
optimizer="rmsprop", loss=None, metrics=["mean_squared_error"]
)
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
out = model.predict(None, steps=3)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (10 * 3, 4))
self.assertEqual(out[1].shape, (10 * 3, 4))
def test_target_tensors(self):
with tf.Graph().as_default(), self.cached_session():
# single-output, as list
model = sequential.Sequential()
model.add(layers_module.Dense(4, input_shape=(4,), name="dense"))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = backend.variable(target_val)
model.compile(
optimizer="rmsprop", loss="mse", target_tensors=[target]
)
model.train_on_batch(input_val, None)
# single-output, as single tensor
model.compile(
optimizer="rmsprop", loss="mse", target_tensors=target
)
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors={"dense": target},
)
model.train_on_batch(input_val, None)
# test invalid arguments
with self.assertRaises(TypeError):
model.compile(
optimizer="rmsprop", loss="mse", target_tensors=set()
)
with self.assertRaises(ValueError):
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors=[target, target],
)
with self.assertRaises(ValueError):
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors={"dense2": None},
)
with self.assertRaises(ValueError):
model.compile(
optimizer="rmsprop", loss="mse", target_tensors=[target]
)
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = backend.variable(target_val_a)
target_b = backend.variable(target_val_b)
inputs = layers_module.Input(shape=(4,))
output_a = layers_module.Dense(4, name="dense_a")(inputs)
output_b = layers_module.Dense(4, name="dense_b")(inputs)
model = training_module.Model(inputs, [output_a, output_b])
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors=[target_a, target_b],
)
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors={"dense_a": target_a, "dense_b": target_b},
)
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(
optimizer="rmsprop",
loss="mse",
metrics=["mae", metrics_module.CategoricalAccuracy()],
target_tensors=[target_a, target_b],
)
model.train_on_batch(
input_val,
None,
sample_weight={"dense_a": np.random.random((10,))},
)
def test_model_custom_target_tensors(self):
with tf.Graph().as_default(), self.cached_session():
a = input_layer.Input(shape=(3,), name="input_a")
b = input_layer.Input(shape=(3,), name="input_b")
a_2 = layers_module.Dense(4, name="dense_1")(a)
dp = layers_module.Dropout(0.5, name="dropout")
b_2 = dp(b)
y = backend.placeholder([10, 4], name="y")
y1 = backend.placeholder([10, 3], name="y1")
y2 = backend.placeholder([7, 5], name="y2")
model = training_module.Model([a, b], [a_2, b_2])
optimizer = "rmsprop"
loss = "mse"
loss_weights = [1.0, 0.5]
# test list of target tensors
with self.assertRaises(ValueError):
model.compile(
optimizer,
loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors=[y, y1, y2],
)
model.compile(
optimizer,
loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors=[y, y1],
)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
_ = model.train_on_batch(
[input_a_np, input_b_np],
[output_a_np, output_b_np],
{
"dense_1": np.random.random((10,)),
"dropout": np.random.random((10,)),
},
)
# test dictionary of target_tensors
with self.assertRaises(ValueError):
model.compile(
optimizer,
loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={"does_not_exist": y2},
)
# test dictionary of target_tensors
model.compile(
optimizer,
loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={"dense_1": y, "dropout": y1},
)
_ = model.train_on_batch(
[input_a_np, input_b_np],
[output_a_np, output_b_np],
{
"dense_1": np.random.random((10,)),
"dropout": np.random.random((10,)),
},
)
# test with custom TF placeholder as target
pl_target_a = tf.compat.v1.placeholder("float32", shape=(None, 4))
model.compile(
optimizer="rmsprop",
loss="mse",
target_tensors={"dense_1": pl_target_a},
)
model.train_on_batch(
[input_a_np, input_b_np], [output_a_np, output_b_np]
)
class TestTrainingWithMetrics(test_combinations.TestCase):
"""Training tests related to metrics."""
@test_combinations.run_all_keras_modes
def test_metrics_names(self):
a = layers_module.Input(shape=(3,), name="input_a")
b = layers_module.Input(shape=(3,), name="input_b")
dense = layers_module.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = layers_module.Dropout(0.5, name="dropout")(c)
model = training_module.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
metrics = ["mse", metrics_module.BinaryAccuracy()]
model.compile(
optimizer,
loss="mae",
metrics=metrics,
run_eagerly=test_utils.should_run_eagerly(),
)
mse_metric = "mse" if tf.executing_eagerly() else "mean_squared_error"
reference_metric_names = [
"loss",
"dense_loss",
"dropout_loss",
"dense_" + mse_metric,
"dense_binary_accuracy",
"dropout_" + mse_metric,
"dropout_binary_accuracy",
]
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
)
self.assertEqual(reference_metric_names, model.metrics_names)
@test_combinations.run_all_keras_modes
def test_metric_state_reset_between_fit_and_evaluate(self):
model = sequential.Sequential()
model.add(layers_module.Dense(3, activation="relu", input_dim=4))
model.add(layers_module.Dense(1, activation="sigmoid"))
acc_obj = metrics_module.BinaryAccuracy()
model.compile(
loss="mae",
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=test_utils.should_run_eagerly(),
)
x_train = np.random.random((100, 4))
y_train = np.random.random((100, 1))
model.fit(x_train, y_train, batch_size=5, epochs=2)
self.assertEqual(self.evaluate(acc_obj.count), 100)
x_test = np.random.random((10, 4))
y_test = np.random.random((10, 1))
model.evaluate(x_test, y_test, batch_size=5)
self.assertEqual(self.evaluate(acc_obj.count), 10)
@test_combinations.run_all_keras_modes
def test_metric_state_reset_between_test_on_batch_and_evaluate(self):
model = sequential.Sequential()
model.add(layers_module.Dense(3, activation="relu", input_dim=4))
model.add(layers_module.Dense(1, activation="sigmoid"))
acc_obj = metrics_module.BinaryAccuracy()
model.compile(
loss="mae",
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=test_utils.should_run_eagerly(),
)
x_test = np.random.random((10, 4))
y_test = np.random.random((10, 1))
loss, acc = model.test_on_batch(x_test[:2], y_test[:2])
loss_eval, acc_eval = model.evaluate(x_test, y_test)
loss_1, acc_1 = model.test_on_batch(x_test[:2], y_test[:2])
loss_eval_1, acc_eval_1 = model.evaluate(x_test, y_test)
self.assertEqual(loss, loss_1)
self.assertEqual(acc, acc_1)
self.assertEqual(loss_eval, loss_eval_1)
self.assertEqual(acc_eval, acc_eval_1)
@test_combinations.run_with_all_model_types(exclude_models=["sequential"])
@test_combinations.run_all_keras_modes
def test_metrics_valid_compile_input_formats(self):
inp_1 = layers_module.Input(shape=(1,), name="input_1")
inp_2 = layers_module.Input(shape=(1,), name="input_2")
x = layers_module.Dense(3, kernel_initializer="ones", trainable=False)
out_1 = layers_module.Dense(
1, kernel_initializer="ones", name="output_1", trainable=False
)
out_2 = layers_module.Dense(
1, kernel_initializer="ones", name="output_2", trainable=False
)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
model = test_utils.get_multi_io_model(branch_a, branch_b)
# list of metrics.
model.compile(
optimizer="rmsprop",
loss="mse",
metrics=[metrics_module.MeanSquaredError()],
weighted_metrics=[metrics_module.MeanSquaredError()],
run_eagerly=test_utils.should_run_eagerly(),
)
# list of list of metrics.
model.compile(
optimizer="rmsprop",
loss="mse",
metrics=[
metrics_module.MeanSquaredError(),
[metrics_module.MeanSquaredError(), metrics_module.Accuracy()],
],
weighted_metrics=[
metrics_module.MeanSquaredError(),
[metrics_module.MeanSquaredError(), metrics_module.Accuracy()],
],
run_eagerly=test_utils.should_run_eagerly(),
)
# dict of metrics.
model.compile(
optimizer="rmsprop",
loss="mse",
metrics={
"output_1": metrics_module.MeanSquaredError(),
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.Accuracy(),
],
},
weighted_metrics={
"output_1": metrics_module.MeanSquaredError(),
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.Accuracy(),
],
},
run_eagerly=test_utils.should_run_eagerly(),
)
@test_combinations.run_all_keras_modes
def test_metrics_masking(self):
np.random.seed(1337)
model = sequential.Sequential()
model.add(layers_module.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
layers_module.TimeDistributed(
layers_module.Dense(1, kernel_initializer="ones")
)
)
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss="mse",
weighted_metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
# verify that masking is applied.
x = np.array(
# third row is masked
[[[1], [1]], [[1], [1]], [[0], [0]]]
)
y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
scores = model.test_on_batch(x, y)
self.assertArrayNear(scores, [0.25, 0.75], 0.0001)
# verify that masking is combined with sample weights.
w = np.array([3, 2, 4])
scores = model.test_on_batch(x, y, sample_weight=w)
self.assertArrayNear(scores, [0.5, 0.8], 0.0001)
scores = model.train_on_batch(x, y)
self.assertArrayNear(scores, [0.25, 0.75], 0.0001)
scores = model.train_on_batch(x, y, sample_weight=w)
self.assertArrayNear(scores, [0.5 - 0.001037, 0.8], 0.0001)
@test_combinations.run_all_keras_modes
def test_add_metric_with_tensor_on_model(self):
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(1, kernel_initializer="ones")(x)
model = training_module.Model(x, y)
model.add_metric(tf.reduce_sum(y), name="metric_1", aggregation="mean")
if tf.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, "Expected a symbolic Tensor for the metric value"
):
model.add_metric(mean_result, name="metric_2")
else:
with self.assertRaisesRegex(
ValueError, "Using the result of calling a `Metric` object "
):
with backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name="metric_2")(y))
model.compile(
"sgd", loss="mse", run_eagerly=test_utils.should_run_eagerly()
)
inputs = np.ones(shape=(10, 1))
targets = np.ones(shape=(10, 1))
history = model.fit(
inputs,
targets,
epochs=2,
batch_size=5,
validation_data=(inputs, targets),
)
self.assertEqual(history.history["metric_1"][-1], 5)
self.assertEqual(history.history["val_metric_1"][-1], 5)
eval_results = model.evaluate(inputs, targets, batch_size=5)
self.assertEqual(eval_results[-1], 5)
model.predict(inputs, batch_size=5)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
@test_combinations.run_all_keras_modes
def test_add_metric_in_model_call(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.mean = metrics_module.Mean(name="metric_1")
def call(self, x):
self.add_metric(
tf.reduce_sum(x), name="metric_2", aggregation="mean"
)
# Provide same name as in the instance created in __init__
# for eager mode
self.add_metric(self.mean(x), name="metric_1")
return self.dense1(x)
model = TestModel()
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(
x, y, epochs=2, batch_size=5, validation_data=(x, y)
)
self.assertAlmostEqual(history.history["metric_1"][-1], 1, 0)
self.assertAlmostEqual(history.history["val_metric_1"][-1], 1, 0)
self.assertAlmostEqual(history.history["metric_2"][-1], 5, 0)
self.assertAlmostEqual(history.history["val_metric_2"][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertAlmostEqual(eval_results[1], 1, 0)
self.assertAlmostEqual(eval_results[2], 5, 0)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_add_metric_in_layer_call(self):
class TestLayer(layers_module.Layer):
def build(self, input_shape):
self.a = self.add_weight(
"a", (1, 1), initializer="ones", trainable=False
)
self.built = True
def call(self, inputs):
self.add_metric(
tf.reduce_sum(inputs), name="metric_1", aggregation="mean"
)
return inputs + 1
layers = [
TestLayer(input_shape=(1,)),
layers_module.Dense(2, kernel_initializer="ones"),
]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(
x, y, epochs=2, batch_size=5, validation_data=(x, y)
)
self.assertEqual(history.history["metric_1"][-1], 5)
self.assertAlmostEqual(history.history["val_metric_1"][-1], 5, 0)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_model_metrics_list(self):
class LayerWithAddMetric(layers_module.Layer):
def __init__(self):
super().__init__()
self.dense = layers_module.Dense(1, kernel_initializer="ones")
def __call__(self, inputs):
outputs = self.dense(inputs)
self.add_metric(
tf.reduce_sum(outputs), name="metric_1", aggregation="mean"
)
return outputs
class LayerWithNestedAddMetricLayer(layers_module.Layer):
def __init__(self):
super().__init__()
self.layer = LayerWithAddMetric()
def call(self, inputs):
outputs = self.layer(inputs)
self.add_metric(
tf.reduce_sum(outputs), name="metric_2", aggregation="mean"
)
return outputs
x = layers_module.Input(shape=(1,))
y = LayerWithNestedAddMetricLayer()(x)
model = training_module.Model(x, y)
model.add_metric(tf.reduce_sum(y), name="metric_3", aggregation="mean")
if tf.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, "Expected a symbolic Tensor for the metric value"
):
model.add_metric(mean_result, name="metric_4")
else:
with self.assertRaisesRegex(
ValueError, "Using the result of calling a `Metric` object "
):
with backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name="metric_4")(y))
model.compile(
"sgd",
loss="mse",
metrics=[metrics_module.Accuracy("metric_4")],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10)
# Verify that the metrics added using `compile` and `add_metric` API are
# included
self.assertEqual(
[m.name for m in model.metrics],
["loss", "metric_4", "metric_2", "metric_1", "metric_3"],
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_model_metrics_list_in_call(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
def call(self, x):
self.add_metric(
tf.reduce_sum(x), name="metric_1", aggregation="mean"
)
return self.dense1(x)
model = TestModel()
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
metrics=[metrics_module.Accuracy("acc")],
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertEqual(
[m.name for m in model.metrics], ["loss", "acc", "metric_1"]
)
@test_combinations.run_all_keras_modes
def test_multiple_add_metric_calls(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.mean1 = metrics_module.Mean(name="metric_1")
self.mean2 = metrics_module.Mean(name="metric_2")
def call(self, x):
self.add_metric(self.mean2(x), name="metric_2")
self.add_metric(self.mean1(x), name="metric_1")
self.add_metric(
tf.reduce_sum(x), name="metric_3", aggregation="mean"
)
return self.dense1(x)
model = TestModel()
self.assertListEqual(
[m.name for m in model.metrics], ["metric_1", "metric_2"]
)
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(
x, y, epochs=2, batch_size=5, validation_data=(x, y)
)
self.assertAlmostEqual(history.history["metric_1"][-1], 1, 0)
self.assertAlmostEqual(history.history["metric_2"][-1], 1, 0)
self.assertAlmostEqual(history.history["metric_3"][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertArrayNear(eval_results[1:4], [1, 1, 5], 0.1)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@test_combinations.run_all_keras_modes
def test_multiple_add_metric_calls_layer(self):
class TestLayer(layers_module.Layer):
def __init__(self):
super().__init__(name="test_layer")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.m1 = metrics_module.Mean(name="m_1")
self.m2 = [
metrics_module.Mean(name="m_2"),
metrics_module.Mean(name="m_3"),
]
self.m3 = {
"mean4": metrics_module.Mean(name="m_4"),
"mean5": metrics_module.Mean(name="m_5"),
}
def call(self, x):
self.add_metric(self.m2[0](x))
self.add_metric(self.m2[1](x))
self.add_metric(self.m1(x))
self.add_metric(self.m3["mean4"](x))
self.add_metric(self.m3["mean5"](x))
self.add_metric(
tf.reduce_sum(x), name="m_6", aggregation="mean"
)
return self.dense1(x)
layer = TestLayer()
self.assertListEqual(
[m.name for m in layer.metrics], ["m_1", "m_2", "m_3", "m_4", "m_5"]
)
layer(np.ones((10, 10)))
self.assertListEqual(
[m.name for m in layer.metrics],
["m_1", "m_2", "m_3", "m_4", "m_5", "m_6"],
)
@test_combinations.run_all_keras_modes
def test_duplicate_metric_name_in_add_metric(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.mean = metrics_module.Mean(name="metric_1")
self.mean2 = metrics_module.Mean(name="metric_1")
def call(self, x):
self.add_metric(self.mean(x), name="metric_1")
return self.dense1(x)
model = TestModel()
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegex(
ValueError,
"Please provide different names for the metrics you have added. "
'We found 2 metrics with the name: "metric_1"',
):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@test_combinations.run_all_keras_modes
def test_add_metric_without_name(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
def call(self, x):
self.add_metric(tf.reduce_sum(x), aggregation="mean")
return self.dense1(x)
model = TestModel()
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegex(
ValueError, "Please provide a name for your metric like"
):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@test_combinations.run_all_keras_modes
def test_add_metric_correctness(self):
inputs = input_layer.Input(shape=(1,))
targets = input_layer.Input(shape=(1,))
class Bias(layers_module.Layer):
def build(self, input_shape):
self.bias = self.add_weight("bias", (1,), initializer="zeros")
self.mae = metrics_module.MeanAbsoluteError(name="mae_1")
def call(self, inputs):
inputs, targets = inputs
outputs = inputs + self.bias
self.add_metric(self.mae(targets, outputs), name="mae_1")
return outputs
outputs = Bias()([inputs, targets])
model = training_module.Model([inputs, targets], outputs)
model.add_metric(
metrics_module.mean_absolute_error(targets, outputs),
name="mae_2",
aggregation="mean",
)
model.compile(
loss="mae",
optimizer=optimizer_legacy.gradient_descent.SGD(0.1),
metrics=[metrics_module.MeanAbsoluteError(name="mae_3")],
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.array([[0.0], [1.0], [2.0]])
y = np.array([[0.5], [2.0], [3.5]])
history = model.fit([x, y], y, batch_size=3, epochs=5)
expected_val = [1.0, 0.9, 0.8, 0.7, 0.6]
for key in ["loss", "mae_1", "mae_2", "mae_3"]:
self.assertAllClose(history.history[key], expected_val, 1e-3)
@test_combinations.run_all_keras_modes
def test_add_metric_order(self):
class MyLayer(layers_module.Layer):
def call(self, inputs, training=None, mask=None):
self.add_metric(
tf.ones([32]) * 2.0, name="two", aggregation="mean"
)
return inputs
class MyModel(training_module.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._sampler = MyLayer(name="sampler")
def call(self, inputs, training=None, mask=None):
z = self._sampler(inputs)
self.add_metric(
tf.ones([32]) * 1.0, name="one", aggregation="mean"
)
self.add_metric(
tf.ones([32]) * 3.0, name="three", aggregation="mean"
)
return z
xdata = np.random.uniform(size=[32, 16]).astype(np.float32)
dataset_train = tf.data.Dataset.from_tensor_slices((xdata, xdata))
dataset_train = dataset_train.batch(32, drop_remainder=True)
model = MyModel()
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(dataset_train, epochs=3)
self.assertDictEqual(
history.history,
{
"loss": [0.0, 0.0, 0.0],
"three": [3.0, 3.0, 3.0],
"two": [2.0, 2.0, 2.0],
"one": [1.0, 1.0, 1.0],
},
)
@test_combinations.run_all_keras_modes
def test_add_metric_aggregation_mean(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
def call(self, x):
self.add_metric(
tf.reduce_sum(x), name="metric_1", aggregation="mean"
)
return self.dense1(x)
model = TestModel()
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.fit(np.ones(shape=(10, 1)), np.ones(shape=(10, 2)), batch_size=5)
@test_combinations.run_all_keras_modes
def test_add_metric_aggregation_none(self):
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.mean = metrics_module.Mean(name="metric_1")
def call(self, x):
self.add_metric(self.mean(x), name="metric_1", aggregation=None)
return self.dense1(x)
model = TestModel()
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.fit(np.ones(shape=(10, 1)), np.ones(shape=(10, 2)), batch_size=5)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def DISABLED_test_add_metric_invalid_aggregation(self):
# TODO(psv): Re-enable test once it is fixed.
x = layers_module.Input(shape=(1,))
y = layers_module.Dense(1, kernel_initializer="ones")(x)
model = training_module.Model(x, y)
with self.assertRaisesRegex(
ValueError, "only `mean` sample-wise metric aggregation"
):
model.add_metric(
tf.reduce_sum(y), name="metric_1", aggregation="sum"
)
with self.assertRaisesRegex(
ValueError, "only `mean` sample-wise metric aggregation"
):
model.add_metric(
tf.reduce_sum(y), name="metric_1", aggregation=None
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_calling_evaluate_in_callback_during_fit(self):
# Check fix for a bug that caused `evaluate` to hit a cached dataset
# when run from inside a fit callback.
x = layers_module.Input(shape=(2,))
y = layers_module.Dense(2, kernel_initializer="ones", use_bias=False)(x)
model = training_module.Model(x, y)
ones = np.ones((10, 2), dtype=np.float32)
zeros = np.zeros((10, 2), dtype=np.float32)
train_ds = tf.data.Dataset.from_tensor_slices((ones, ones)).batch(5)
val_ds_1 = tf.data.Dataset.from_tensor_slices((ones, ones)).batch(5)
val_ds_2 = tf.data.Dataset.from_tensor_slices((zeros, zeros)).batch(5)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
class MyCallback(Callback):
def on_epoch_end(self, *args, **kwargs):
eval_result = self.model.evaluate(val_ds_2)
if abs(eval_result) > 1e-7:
raise AssertionError(
"Expected to hit the zeros dataset but got high loss "
"value of %s" % eval_result
)
history = model.fit(
train_ds, validation_data=val_ds_1, callbacks=[MyCallback()]
)
# Evaluate at the end of fit should hit the ones dataset (cached)
self.assertGreater(abs(history.history["val_loss"][-1]), 0.1)
# Standalone call to evaluate should not hit the cached dataset
eval_result = model.evaluate(val_ds_2)
self.assertLess(abs(eval_result), 1e-7)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_model_with_nested_compiled_model(self):
class LayerWithAddMetric(layers_module.Layer):
def __init__(self):
super().__init__()
self.dense = layers_module.Dense(1, kernel_initializer="ones")
def call(self, inputs):
outputs = self.dense(inputs)
self.add_metric(
tf.reduce_sum(outputs), name="mean", aggregation="mean"
)
return outputs
x = layers_module.Input(shape=(1,))
y = LayerWithAddMetric()(x)
inner_model = training_module.Model(x, y)
inner_model.add_metric(
tf.reduce_sum(y), name="mean1", aggregation="mean"
)
inner_model.compile(
"sgd",
loss="mse",
metrics=[metrics_module.Accuracy("acc")],
run_eagerly=test_utils.should_run_eagerly(),
)
inner_model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10)
self.assertEqual(
[m.name for m in inner_model.metrics],
["loss", "acc", "mean", "mean1"],
)
x = layers_module.Input(shape=[1])
y = inner_model(x)
outer_model = training_module.Model(x, y)
outer_model.add_metric(
tf.reduce_sum(y), name="mean2", aggregation="mean"
)
outer_model.compile(
"sgd",
loss="mse",
metrics=[metrics_module.Accuracy("acc2")],
run_eagerly=test_utils.should_run_eagerly(),
)
outer_model.fit(np.ones((10, 1)), np.ones((10, 1)), batch_size=10)
self.assertEqual(
[m.name for m in outer_model.metrics],
["loss", "acc2", "mean", "mean1", "mean2"],
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_model_with_metric_class_that_returns_dict(self):
x = layers_module.Input(shape=(2,))
y = layers_module.Dense(3)(x)
model = training_module.Model(x, y)
class DictMetric(metrics_module.Metric):
def __init__(self):
super().__init__()
self.sample_count = tf.Variable(0)
self.l2_sum = tf.Variable(0.0)
def update_state(self, y_true, y_pred, sample_weight=None):
self.l2_sum.assign_add(
tf.reduce_sum(tf.square(y_true - y_pred))
)
self.sample_count.assign_add(tf.shape(y_true)[0])
def reset_state(self):
self.sample_count.assign(0)
self.l2_sum.assign(0.0)
def result(self):
mse = self.l2_sum / tf.cast(self.sample_count, "float32")
rmse = tf.sqrt(mse)
return {"my_mse": mse, "my_rmse": rmse}
model.compile(
"sgd",
"mse",
metrics=["mae", DictMetric()],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(np.ones((10, 2)), np.ones((10, 3)))
self.assertEqual(
list(history.history.keys()), ["loss", "mae", "my_mse", "my_rmse"]
)
list_evaluate_res = model.evaluate(np.ones((10, 2)), np.ones((10, 3)))
self.assertEqual(len(list_evaluate_res), 4)
dict_evaluate_res = model.evaluate(
np.ones((10, 2)), np.ones((10, 3)), return_dict=True
)
self.assertEqual(
list(dict_evaluate_res.keys()), ["loss", "mae", "my_mse", "my_rmse"]
)
list_train_on_batch_res = model.train_on_batch(
np.ones((10, 2)), np.ones((10, 3))
)
self.assertEqual(len(list_train_on_batch_res), 4)
dict_train_on_batch_res = model.train_on_batch(
np.ones((10, 2)), np.ones((10, 3)), return_dict=True
)
self.assertEqual(
list(dict_train_on_batch_res.keys()),
["loss", "mae", "my_mse", "my_rmse"],
)
list_test_on_batch_res = model.test_on_batch(
np.ones((10, 2)), np.ones((10, 3))
)
self.assertEqual(len(list_test_on_batch_res), 4)
dict_test_on_batch_res = model.test_on_batch(
np.ones((10, 2)), np.ones((10, 3)), return_dict=True
)
self.assertEqual(
list(dict_test_on_batch_res.keys()),
["loss", "mae", "my_mse", "my_rmse"],
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_add_metric_in_model_call_that_returns_dict(self):
class DictMetric(metrics_module.Metric):
def __init__(self):
super().__init__()
self.sample_count = tf.Variable(0)
self.l2_sum = tf.Variable(0.0)
def update_state(self, y_true, y_pred, sample_weight=None):
self.l2_sum.assign_add(
tf.reduce_sum(tf.square(y_true - y_pred))
)
self.sample_count.assign_add(tf.shape(y_true)[0])
def reset_state(self):
self.sample_count.assign(0)
self.l2_sum.assign(0.0)
def result(self):
mse = self.l2_sum / tf.cast(self.sample_count, "float32")
rmse = tf.sqrt(mse)
return {"my_mse": mse, "my_rmse": rmse}
class TestModel(training_module.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense1 = layers_module.Dense(2, kernel_initializer="ones")
self.dict_metric = DictMetric()
def call(self, x):
self.add_metric(
tf.reduce_sum(x), name="metric_2", aggregation="mean"
)
# Provide same name as in the instance created in __init__
# for eager mode
self.add_metric(self.dict_metric(x, 1 - x), name="metric_1")
return self.dense1(x)
model = TestModel()
model.compile(
loss="mse",
optimizer=RMSPropOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(
x, y, epochs=2, batch_size=5, validation_data=(x, y)
)
self.assertAlmostEqual(history.history["metric_2"][-1], 5, 0)
self.assertAlmostEqual(history.history["val_metric_2"][-1], 5, 0)
self.assertAlmostEqual(history.history["my_mse"][-1], 1, 0)
self.assertAlmostEqual(history.history["val_my_mse"][-1], 1, 0)
self.assertAlmostEqual(history.history["my_rmse"][-1], 1, 0)
self.assertAlmostEqual(history.history["val_my_rmse"][-1], 1, 0)
eval_results = model.evaluate(x, y, batch_size=5, return_dict=True)
self.assertAlmostEqual(eval_results["metric_2"], 5, 0)
self.assertAlmostEqual(eval_results["my_mse"], 1, 0)
self.assertAlmostEqual(eval_results["my_rmse"], 1, 0)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
class BareUpdateLayer(layers_module.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
"counter",
dtype="int32",
shape=(),
initializer="zeros",
trainable=False,
)
def call(self, inputs):
tf.compat.v1.assign_add(self.counter, 1)
return tf.cast(self.counter, inputs.dtype) * inputs
class LambdaUpdateLayer(layers_module.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
"counter",
dtype="int32",
shape=(),
initializer="zeros",
trainable=False,
)
def call(self, inputs):
# Make sure update isn't run twice.
self.add_update(lambda: tf.compat.v1.assign_add(self.counter, 1))
return tf.cast(self.counter, inputs.dtype) * inputs
class NestedUpdateLayer(layers_module.Layer):
def build(self, input_shape):
self.layer = BareUpdateLayer()
self.layer.build(input_shape)
@property
def counter(self):
return self.layer.counter
def call(self, inputs):
return self.layer(inputs)
class SubgraphUpdateLayer(layers_module.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
"counter",
dtype="int32",
shape=(),
initializer="zeros",
trainable=False,
)
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
if training:
self.counter.assign(self.counter + 1)
return inputs
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TestAutoUpdates(test_combinations.TestCase):
@test_combinations.run_with_all_model_types
@parameterized.named_parameters(
("bare_update", BareUpdateLayer),
("lambda_update", LambdaUpdateLayer),
("nested_update", NestedUpdateLayer),
)
def test_updates_in_model(self, layer_builder):
layer = layer_builder()
x, y = np.ones((10, 10)), np.ones((10, 1))
model = test_utils.get_model_from_layers(
[layer, layers_module.Dense(1)], input_shape=(10,)
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@test_combinations.run_with_all_model_types
def test_lambda_updates_trainable_false(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
layer = LambdaUpdateLayer()
model = test_utils.get_model_from_layers(
[layer, layers_module.Dense(1)], input_shape=(10,)
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
layer.trainable = False
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@test_combinations.run_with_all_model_types
def test_subgraph_updates_in_model(self):
layer = SubgraphUpdateLayer()
x, y = np.ones((10, 10)), np.ones((10, 1))
model = test_utils.get_model_from_layers(
[layer, layers_module.Dense(1)], input_shape=(10,)
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@parameterized.named_parameters(
("bare_update", BareUpdateLayer),
("lambda_update", LambdaUpdateLayer),
("nested_update", NestedUpdateLayer),
)
def test_updates_standalone_layer(self, layer_builder):
layer = layer_builder()
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
def test_trainable_false_standalone_layer(self):
layer = LambdaUpdateLayer()
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
layer.trainable = False
y = layer(np.ones((10, 10)))
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
@test_combinations.run_with_all_model_types
def test_batchnorm_trainable_false(self):
bn = layers_module.BatchNormalization()
model = test_utils.get_model_from_layers(
[bn, layers_module.Dense(1)], input_shape=(10,)
)
bn.trainable = False
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y, batch_size=2, epochs=1)
self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,)))
self.assertAllEqual(self.evaluate(bn.moving_variance), np.ones((10,)))
class TestFunctionTracing(test_combinations.TestCase):
def _seq_model_and_data(self):
model = sequential.Sequential(
[layers_module.Dense(4, activation="relu")]
)
model.compile(loss="mse", optimizer="rmsprop")
x = np.random.random((10, 6))
y = np.random.random((10, 4))
return model, x, y
@test_combinations.run_all_keras_modes(
always_skip_v1=True, always_skip_eager=True
)
def test_no_tracing_between_epoch(self):
if _is_oss():
self.skipTest("b/198729465")
model, x, y = self._seq_model_and_data()
logging.set_verbosity(1)
with self.assertLogs(level=1) as logs:
model.fit(x, y, epochs=10, batch_size=5, validation_data=(x, y))
new_func_graph = "INFO:absl:Creating new FuncGraph for Python function"
self.assertEqual(sum(new_func_graph in log for log in logs.output), 9)
@test_combinations.run_all_keras_modes(
always_skip_v1=True, always_skip_eager=True
)
def test_evaluate_no_cached_data(self):
if _is_oss():
self.skipTest("b/198729465")
model, x, y = self._seq_model_and_data()
new_func_graph = "INFO:absl:Creating new FuncGraph for Python function"
logging.set_verbosity(1)
with self.assertLogs(level=1) as eval_logs:
for _ in range(6):
model.evaluate(x, y, batch_size=5)
self.assertEqual(
sum(new_func_graph in log for log in eval_logs.output), 20
)
class TestBuildCustomModel(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_build_list_of_inputs(self):
class MyModel(training_module.Model):
def __init__(self):
super().__init__()
self.l1 = layers_module.Dense(1)
self.l2 = layers_module.Dense(2)
def call(self, x):
a, b = x
return self.l1(a) + self.l2(b)
# List of tuples
model = MyModel()
model.build([(None, 1), (None, 2)])
self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1])
self.assertEqual(model.l2.kernel.shape.as_list(), [2, 2])
# List of lists
model = MyModel()
model.build([[None, 1], [None, 2]])
self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1])
self.assertEqual(model.l2.kernel.shape.as_list(), [2, 2])
@test_combinations.run_all_keras_modes
def test_build_single_inputs(self):
class MyModel(training_module.Model):
def __init__(self):
super().__init__()
self.l1 = layers_module.Dense(1)
def call(self, x):
return self.l1(x)
model = MyModel()
model.build((None, 1))
self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1])
model = MyModel()
model.build([None, 1])
self.assertEqual(model.l1.kernel.shape.as_list(), [1, 1])
@test_combinations.run_all_keras_modes
def test_build_dict_inputs(self):
class MyModel(training_module.Model):
def __init__(self):
super().__init__()
self.l1 = layers_module.Dense(1)
def call(self, inputs):
return self.l1(inputs["x"])
model = MyModel()
model.build({"x": [None, 16]})
self.assertEqual(model.l1.kernel.shape.as_list(), [16, 1])
def test_save_top_level_model_weights_h5(self):
class MyModel(training_module.Model):
def __init__(self):
super().__init__()
self.class_token = self.add_weight(
shape=(1,), name="class_token"
)
self.inner_layer = layers_module.Dense(1)
def call(self, inputs):
return self.inner_layer(inputs) * self.class_token
h5_file = tempfile.mktemp(".h5")
m1 = MyModel()
m1.build((1, 1))
m1.save_weights(h5_file)
m2 = MyModel()
m2.build((1, 1))
m2.load_weights(h5_file)
self.assertAllEqual(m1.get_weights(), m2.get_weights())
m2.load_weights(h5_file, by_name=True)
self.assertAllEqual(m1.get_weights(), m2.get_weights())
class ScalarDataModelTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_scalar_loss_reduction(self):
class MyModel(training_module.Model):
def __init__(self):
super().__init__()
self.w = self.add_weight(initializer="ones", name="kernel")
self.b = self.add_weight(initializer="zeros", name="bias")
def call(self, inputs):
return inputs * self.w + self.b
model = MyModel()
model.compile(
optimizer_legacy.gradient_descent.SGD(1e-2),
loss="mse",
metrics=["binary_accuracy"],
)
# learn y = x * 2 + 0.5
x = np.array([3, 5, 5, 3, 5], dtype="float32")
y = x * 2 + 0.5
x2d = np.expand_dims(x, axis=-1)
y2d = np.expand_dims(y, axis=-1)
loss, acc = model.evaluate(x, y)
loss2d, acc2d = model.evaluate(x2d, y2d)
self.assertAllClose([loss, acc], [loss2d, acc2d], atol=1e-6)
model.fit(x, y, epochs=20)
preds = model.predict(x)
self.assertEqual(preds.shape, (5,))
self.assertAllClose(preds, y, atol=2e-1)
# Class used for testing.
class SubclassModel(training_module.Model):
def __init__(self, name=None):
super().__init__(name=name)
self.d1 = layers_module.Dense(1000)
self.d2 = layers_module.Dense(1000)
self.dropout = layers_module.Dropout(0.1)
def call(self, inputs, training=None):
x = self.d1(inputs)
x = self.dropout(x, training=training)
return self.d2(x)
class TestVariableObjectPathMapping(test_combinations.TestCase):
def test_subclass_model_get_weight_paths(self):
model = SubclassModel()
# Make sure the object path produce nothing when weights are not
# initialized
self.assertEmpty(model.get_weight_paths())
model(tf.zeros((10, 10)))
mapping = model.get_weight_paths()
self.assertEqual(
mapping.keys(), {"d1.kernel", "d1.bias", "d2.kernel", "d2.bias"}
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_functional_model_get_weight_paths(self):
inputs = input_layer.Input(shape=(10,))
x = layers_module.Dense(100, name="d1")(inputs)
output = layers_module.Dense(200, name="d2", activation="softmax")(x)
model = training_module.Model(inputs, output)
mapping = model.get_weight_paths()
self.assertEqual(
mapping.keys(), {"d1.kernel", "d1.bias", "d2.kernel", "d2.bias"}
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_sequential_model_get_weight_paths(self):
model = sequential.Sequential(
[
layers_module.Dense(100, name="d1", input_shape=(10,)),
layers_module.Dense(200, name="d2", activation="softmax"),
]
)
mapping = model.get_weight_paths()
self.assertEqual(
mapping.keys(), {"d1.kernel", "d1.bias", "d2.kernel", "d2.bias"}
)
def _is_oss():
"""Returns whether the test is run under OSS."""
return len(sys.argv) >= 1 and "bazel" in sys.argv[0]
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/training_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_test.py",
"repo_id": "tf-keras",
"token_count": 98776
} | 215 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tf_keras.feature_column import base_feature_layer as kfc
from tf_keras.feature_column import dense_features
from tf_keras.utils import tf_contextlib
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.DenseFeatures", v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with
FeatureColumns. At the first layer of the model, this column oriented data
should be converted to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords",
10000),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(
dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self, feature_columns, trainable=True, name=None, **kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes
derived from `DenseColumn` such as `numeric_column`,
`embedding_column`, `bucketized_column`, `indicator_column`. If you
have categorical features, you can wrap them with an
`embedding_column` or `indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs
)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
super(kfc._BaseFeaturesLayer, self).build(None)
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager):
"""Manages the state of DenseFeatures."""
def create_variable(
self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None,
):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError("Variable already exists.")
# We explicitly track these variables since `name` is not guaranteed to
# be unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + "/" + name)
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this
context manager to disable the tracking the library method does and do your
own tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_weight("name1") # Creates a var and doesn't track it
# We track variable with name `name2`
self._track_trackable("name2", var)
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
previous_value = getattr(obj, "_manual_tracking", True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
| tf-keras/tf_keras/feature_column/dense_features_v2.py/0 | {
"file_path": "tf-keras/tf_keras/feature_column/dense_features_v2.py",
"repo_id": "tf-keras",
"token_count": 2315
} | 216 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test to demonstrate basic TF-Keras training with a variety of strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tensorflow.compat.v2 as tf
ds_combinations = tf.__internal__.distribute.combinations
# Note: Strategy combinations are not (yet) public APIs, so they are subject
# to API changes and backward-compatibility is not guaranteed.
# TODO(b/188763034): Proceed to export the strategy combinations as public APIs.
STRATEGIES = [
ds_combinations.default_strategy,
ds_combinations.mirrored_strategy_with_two_cpus,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.cloud_tpu_strategy,
ds_combinations.parameter_server_strategy_3worker_2ps_cpu,
ds_combinations.parameter_server_strategy_3worker_2ps_1gpu,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=STRATEGIES, mode="eager")
)
class DistributedTrainingTest(tf.test.TestCase):
"""Test to demonstrate basic TF-Keras training with a variety of
strategies.
"""
def testKerasTrainingAPI(self, strategy):
if not tf.__internal__.tf2.enabled() and isinstance(
strategy, tf.distribute.experimental.ParameterServerStrategy
):
self.skipTest(
"Parameter Server strategy with dataset creator need to be run "
"when eager execution is enabled."
)
# A `dataset_fn` is required for `Model.fit` to work across all
# strategies.
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=64
)
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
dataset = (
tf.data.Dataset.from_tensor_slices((x, y)).shuffle(10).repeat()
)
dataset = dataset.shard(
input_context.num_input_pipelines,
input_context.input_pipeline_id,
)
return dataset.batch(batch_size).prefetch(2)
with strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
optimizer = tf.keras.optimizers.SGD()
model.compile(optimizer, loss="mse", steps_per_execution=5)
x = tf.keras.utils.experimental.DatasetCreator(dataset_fn)
logdir = os.path.join(self.get_temp_dir(), "logdir")
model.fit(
x,
epochs=2,
steps_per_epoch=20,
callbacks=[
tf.keras.callbacks.TensorBoard(
logdir,
update_freq=5,
write_steps_per_second=True,
)
],
)
events_got = []
for event_file in glob.glob(logdir + "/train/events.out.*"):
for event in tf.compat.v1.train.summary_iterator(event_file):
if not event.summary:
continue
for value in event.summary.value:
if value.tag != "batch_loss":
continue
events_got += [event.step]
# total steps = epochs * steps_per_epoch
events_expected = [5, 10, 15, 20, 25, 30, 35, 40]
if isinstance(
strategy, tf.distribute.experimental.ParameterServerStrategy
):
# Metrics are not logged with this strategy as they are not
# immediately available on batch end
events_expected = []
if (
strategy.cluster_resolver
and strategy.cluster_resolver.task_type == "worker"
):
# The below assertion is run by both chief and workers when using
# `tf.distribute.MultiWorkerMirroredStrategy`, but only the chief
# will log events.
events_expected = []
self.assertEqual(events_got, events_expected)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/integration_test/distributed_training_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/distributed_training_test.py",
"repo_id": "tf-keras",
"token_count": 2125
} | 217 |
"""Model where almost everything is implemented from scratch.
- Custom layers
- Custom model subclass
- Custom train_step and test_step
- Custom compile()
- Custom learning rate schedule
- Custom metrics
"""
import tensorflow as tf
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
INPUT_DIM = 32
NUM_CLASSES = 5
def get_data_spec(batch_size):
return (
InputSpec((batch_size, INPUT_DIM)),
InputSpec((batch_size, NUM_CLASSES)),
)
def get_input_preprocessor():
return None
class Linear(keras.layers.Layer):
def __init__(self, units=32, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
name="w",
)
self.b = self.add_weight(
shape=(self.units,),
initializer="random_normal",
trainable=True,
name="b",
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def reset_state(self):
self.true_positives.assign(0)
class CustomModel(keras.Model):
def __init__(self):
super().__init__()
self.loss_tracker = keras.metrics.Mean(name="loss")
self.btp_metric = BinaryTruePositives(name="mae")
self.linear_1 = Linear(32, name="linear_1")
self.linear_2 = Linear(NUM_CLASSES, name="linear_2")
def call(self, inputs, training=False):
x = self.linear_1(inputs)
x = self.linear_2(x)
return x
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = keras.losses.mean_squared_error(y, y_pred)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.loss_tracker.update_state(loss)
self.btp_metric.update_state(y, y_pred)
return {
"loss": self.loss_tracker.result(),
"btp": self.btp_metric.result(),
}
def test_step(self, data):
x, y = data
y_pred = self(x, training=True)
loss = keras.losses.mean_squared_error(y, y_pred)
self.loss_tracker.update_state(loss)
self.btp_metric.update_state(y, y_pred)
return {
"loss": self.loss_tracker.result(),
"btp": self.btp_metric.result(),
}
@property
def metrics(self):
return [self.loss_tracker, self.btp_metric]
class CustomLRSchedule(keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
def __call__(self, step):
return self.initial_learning_rate / tf.cast(step + 1, "float32")
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
}
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
model = CustomModel()
if build:
model(tf.zeros((1, INPUT_DIM)))
if compile:
model.compile(
optimizer=keras.optimizers.Adam(CustomLRSchedule(0.1)),
loss=custom_loss,
jit_compile=jit_compile,
)
return model
def get_custom_objects():
return {
"Linear": Linear,
"CustomLRSchedule": CustomLRSchedule,
"CustomModel": CustomModel,
"BinaryTruePositives": BinaryTruePositives,
"custom_loss": custom_loss,
}
| tf-keras/tf_keras/integration_test/models/low_level_model.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/low_level_model.py",
"repo_id": "tf-keras",
"token_count": 2096
} | 218 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demonstrate TF-Keras preprocessing layers applied in tf.data.Dataset.map."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tf_keras.integration_test import preprocessing_test_utils as utils
ds_combinations = tf.__internal__.distribute.combinations
multi_process_runner = tf.__internal__.distribute.multi_process_runner
test_combinations = tf.__internal__.test.combinations
# Note: Strategy combinations are not (yet) public APIs, so they are subject
# to API changes and backward-compatibility is not guaranteed. Note that we
# skip parameter server strategy here, as parameter server strategy requires
# a DatasetCreator when training on a tf.data.Dataset.
STRATEGIES = [
ds_combinations.default_strategy,
ds_combinations.mirrored_strategy_with_two_cpus,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.cloud_tpu_strategy,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@ds_combinations.generate(
test_combinations.combine(strategy=STRATEGIES, mode="eager")
)
class PreprocessingAppliedInDatasetTest(tf.test.TestCase):
"""Demonstrate TF-Keras preprocessing layers applied in
tf.data.Dataset.map.
"""
def testDistributedModelFit(self, strategy):
with strategy.scope():
preprocessing_model = utils.make_preprocessing_model(
self.get_temp_dir()
)
training_model = utils.make_training_model()
training_model.compile(optimizer="sgd", loss="binary_crossentropy")
dataset = utils.make_dataset()
dataset = dataset.batch(utils.BATCH_SIZE)
dataset = dataset.map(lambda x, y: (preprocessing_model(x), y))
training_model.fit(dataset, epochs=2)
if __name__ == "__main__":
multi_process_runner.test_main()
| tf-keras/tf_keras/integration_test/preprocessing_applied_in_dataset_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/preprocessing_applied_in_dataset_test.py",
"repo_id": "tf-keras",
"token_count": 906
} | 219 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
# isort: off
import tensorflow.compat.v2 as tf
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.base_preprocessing_layer import PreprocessingLayer
# Generic layers.
from tf_keras.engine.input_layer import Input
from tf_keras.engine.input_layer import InputLayer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.activation.elu import ELU
from tf_keras.layers.activation.leaky_relu import LeakyReLU
from tf_keras.layers.activation.prelu import PReLU
# Activations layers.
from tf_keras.layers.activation.relu import ReLU
from tf_keras.layers.activation.softmax import Softmax
from tf_keras.layers.activation.thresholded_relu import ThresholdedReLU
from tf_keras.layers.attention.additive_attention import AdditiveAttention
from tf_keras.layers.attention.attention import Attention
# Attention layers.
from tf_keras.layers.attention.multi_head_attention import MultiHeadAttention
# Convolution layer aliases.
# Convolution layers.
from tf_keras.layers.convolutional.conv1d import Conv1D
from tf_keras.layers.convolutional.conv1d import Convolution1D
from tf_keras.layers.convolutional.conv1d_transpose import Conv1DTranspose
from tf_keras.layers.convolutional.conv1d_transpose import (
Convolution1DTranspose,
)
from tf_keras.layers.convolutional.conv2d import Conv2D
from tf_keras.layers.convolutional.conv2d import Convolution2D
from tf_keras.layers.convolutional.conv2d_transpose import Conv2DTranspose
from tf_keras.layers.convolutional.conv2d_transpose import (
Convolution2DTranspose,
)
from tf_keras.layers.convolutional.conv3d import Conv3D
from tf_keras.layers.convolutional.conv3d import Convolution3D
from tf_keras.layers.convolutional.conv3d_transpose import Conv3DTranspose
from tf_keras.layers.convolutional.conv3d_transpose import (
Convolution3DTranspose,
)
from tf_keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
from tf_keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
from tf_keras.layers.convolutional.separable_conv1d import SeparableConv1D
from tf_keras.layers.convolutional.separable_conv1d import (
SeparableConvolution1D,
)
from tf_keras.layers.convolutional.separable_conv2d import SeparableConv2D
from tf_keras.layers.convolutional.separable_conv2d import (
SeparableConvolution2D,
)
# Core layers.
from tf_keras.layers.core.activation import Activation
from tf_keras.layers.core.dense import Dense
from tf_keras.layers.core.einsum_dense import EinsumDense
from tf_keras.layers.core.embedding import Embedding
from tf_keras.layers.core.identity import Identity
from tf_keras.layers.core.lambda_layer import Lambda
from tf_keras.layers.core.masking import Masking
from tf_keras.layers.core.tf_op_layer import ClassMethod
from tf_keras.layers.core.tf_op_layer import InstanceMethod
from tf_keras.layers.core.tf_op_layer import InstanceProperty
from tf_keras.layers.core.tf_op_layer import SlicingOpLambda
from tf_keras.layers.core.tf_op_layer import TFOpLambda
# Locally-connected layers.
from tf_keras.layers.locally_connected.locally_connected1d import (
LocallyConnected1D,
)
from tf_keras.layers.locally_connected.locally_connected2d import (
LocallyConnected2D,
)
# Merging functions.
# Merging layers.
from tf_keras.layers.merging.add import Add
from tf_keras.layers.merging.add import add
from tf_keras.layers.merging.average import Average
from tf_keras.layers.merging.average import average
from tf_keras.layers.merging.concatenate import Concatenate
from tf_keras.layers.merging.concatenate import concatenate
from tf_keras.layers.merging.dot import Dot
from tf_keras.layers.merging.dot import dot
from tf_keras.layers.merging.maximum import Maximum
from tf_keras.layers.merging.maximum import maximum
from tf_keras.layers.merging.minimum import Minimum
from tf_keras.layers.merging.minimum import minimum
from tf_keras.layers.merging.multiply import Multiply
from tf_keras.layers.merging.multiply import multiply
from tf_keras.layers.merging.subtract import Subtract
from tf_keras.layers.merging.subtract import subtract
from tf_keras.layers.normalization.batch_normalization import (
SyncBatchNormalization,
)
# Normalization layers.
from tf_keras.layers.normalization.group_normalization import GroupNormalization
from tf_keras.layers.normalization.layer_normalization import LayerNormalization
from tf_keras.layers.normalization.unit_normalization import UnitNormalization
from tf_keras.layers.normalization.spectral_normalization import (
SpectralNormalization,
) # noqa: E501
# Preprocessing layers.
from tf_keras.layers.preprocessing.category_encoding import CategoryEncoding
from tf_keras.layers.preprocessing.discretization import Discretization
from tf_keras.layers.preprocessing.hashed_crossing import HashedCrossing
from tf_keras.layers.preprocessing.hashing import Hashing
# Image preprocessing layers.
from tf_keras.layers.preprocessing.image_preprocessing import CenterCrop
from tf_keras.layers.preprocessing.image_preprocessing import RandomBrightness
from tf_keras.layers.preprocessing.image_preprocessing import RandomContrast
from tf_keras.layers.preprocessing.image_preprocessing import RandomCrop
from tf_keras.layers.preprocessing.image_preprocessing import RandomFlip
from tf_keras.layers.preprocessing.image_preprocessing import RandomHeight
from tf_keras.layers.preprocessing.image_preprocessing import RandomRotation
from tf_keras.layers.preprocessing.image_preprocessing import RandomTranslation
from tf_keras.layers.preprocessing.image_preprocessing import RandomWidth
from tf_keras.layers.preprocessing.image_preprocessing import RandomZoom
from tf_keras.layers.preprocessing.image_preprocessing import Rescaling
from tf_keras.layers.preprocessing.image_preprocessing import Resizing
from tf_keras.layers.preprocessing.integer_lookup import IntegerLookup
from tf_keras.layers.preprocessing.normalization import Normalization
from tf_keras.layers.preprocessing.string_lookup import StringLookup
from tf_keras.layers.preprocessing.text_vectorization import TextVectorization
from tf_keras.layers.regularization.activity_regularization import (
ActivityRegularization,
)
from tf_keras.layers.regularization.alpha_dropout import AlphaDropout
# Regularization layers.
from tf_keras.layers.regularization.dropout import Dropout
from tf_keras.layers.regularization.gaussian_dropout import GaussianDropout
from tf_keras.layers.regularization.gaussian_noise import GaussianNoise
from tf_keras.layers.regularization.spatial_dropout1d import SpatialDropout1D
from tf_keras.layers.regularization.spatial_dropout2d import SpatialDropout2D
from tf_keras.layers.regularization.spatial_dropout3d import SpatialDropout3D
# Reshaping layers.
from tf_keras.layers.reshaping.cropping1d import Cropping1D
from tf_keras.layers.reshaping.cropping2d import Cropping2D
from tf_keras.layers.reshaping.cropping3d import Cropping3D
from tf_keras.layers.reshaping.flatten import Flatten
from tf_keras.layers.reshaping.permute import Permute
from tf_keras.layers.reshaping.repeat_vector import RepeatVector
from tf_keras.layers.reshaping.reshape import Reshape
from tf_keras.layers.reshaping.up_sampling1d import UpSampling1D
from tf_keras.layers.reshaping.up_sampling2d import UpSampling2D
from tf_keras.layers.reshaping.up_sampling3d import UpSampling3D
from tf_keras.layers.reshaping.zero_padding1d import ZeroPadding1D
from tf_keras.layers.reshaping.zero_padding2d import ZeroPadding2D
from tf_keras.layers.reshaping.zero_padding3d import ZeroPadding3D
if tf.__internal__.tf2.enabled():
from tf_keras.layers.normalization.batch_normalization import (
BatchNormalization,
)
from tf_keras.layers.normalization.batch_normalization_v1 import (
BatchNormalization as BatchNormalizationV1,
)
BatchNormalizationV2 = BatchNormalization
else:
from tf_keras.layers.normalization.batch_normalization import (
BatchNormalization as BatchNormalizationV2,
)
from tf_keras.layers.normalization.batch_normalization_v1 import (
BatchNormalization,
)
BatchNormalizationV1 = BatchNormalization
# Kernelized layers.
from tf_keras.layers.kernelized import RandomFourierFeatures
# Pooling layer aliases.
# Pooling layers.
from tf_keras.layers.pooling.average_pooling1d import AveragePooling1D
from tf_keras.layers.pooling.average_pooling1d import AvgPool1D
from tf_keras.layers.pooling.average_pooling2d import AveragePooling2D
from tf_keras.layers.pooling.average_pooling2d import AvgPool2D
from tf_keras.layers.pooling.average_pooling3d import AveragePooling3D
from tf_keras.layers.pooling.average_pooling3d import AvgPool3D
from tf_keras.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D,
)
from tf_keras.layers.pooling.global_average_pooling1d import GlobalAvgPool1D
from tf_keras.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D,
)
from tf_keras.layers.pooling.global_average_pooling2d import GlobalAvgPool2D
from tf_keras.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D,
)
from tf_keras.layers.pooling.global_average_pooling3d import GlobalAvgPool3D
from tf_keras.layers.pooling.global_max_pooling1d import GlobalMaxPool1D
from tf_keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
from tf_keras.layers.pooling.global_max_pooling2d import GlobalMaxPool2D
from tf_keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
from tf_keras.layers.pooling.global_max_pooling3d import GlobalMaxPool3D
from tf_keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
from tf_keras.layers.pooling.max_pooling1d import MaxPool1D
from tf_keras.layers.pooling.max_pooling1d import MaxPooling1D
from tf_keras.layers.pooling.max_pooling2d import MaxPool2D
from tf_keras.layers.pooling.max_pooling2d import MaxPooling2D
from tf_keras.layers.pooling.max_pooling3d import MaxPool3D
from tf_keras.layers.pooling.max_pooling3d import MaxPooling3D
from tf_keras.layers.rnn.abstract_rnn_cell import AbstractRNNCell
# Recurrent layers.
from tf_keras.layers.rnn.base_rnn import RNN
from tf_keras.layers.rnn.simple_rnn import SimpleRNN
from tf_keras.layers.rnn.simple_rnn import SimpleRNNCell
from tf_keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
if tf.__internal__.tf2.enabled():
from tf_keras.layers.rnn.gru import GRU
from tf_keras.layers.rnn.gru import GRUCell
from tf_keras.layers.rnn.gru_v1 import GRU as GRUV1
from tf_keras.layers.rnn.gru_v1 import GRUCell as GRUCellV1
from tf_keras.layers.rnn.lstm import LSTM
from tf_keras.layers.rnn.lstm import LSTMCell
from tf_keras.layers.rnn.lstm_v1 import LSTM as LSTMV1
from tf_keras.layers.rnn.lstm_v1 import LSTMCell as LSTMCellV1
GRUV2 = GRU
GRUCellV2 = GRUCell
LSTMV2 = LSTM
LSTMCellV2 = LSTMCell
else:
from tf_keras.layers.rnn.gru import GRU as GRUV2
from tf_keras.layers.rnn.gru import GRUCell as GRUCellV2
from tf_keras.layers.rnn.gru_v1 import GRU
from tf_keras.layers.rnn.gru_v1 import GRUCell
from tf_keras.layers.rnn.lstm import LSTM as LSTMV2
from tf_keras.layers.rnn.lstm import LSTMCell as LSTMCellV2
from tf_keras.layers.rnn.lstm_v1 import LSTM
from tf_keras.layers.rnn.lstm_v1 import LSTMCell
GRUV1 = GRU
GRUCellV1 = GRUCell
LSTMV1 = LSTM
LSTMCellV1 = LSTMCell
# Serialization functions.
from tf_keras.layers import serialization
# Wrapper functions.
from tf_keras.layers.rnn.base_wrapper import Wrapper
from tf_keras.layers.rnn.bidirectional import Bidirectional
# RNN Cell wrappers.
from tf_keras.layers.rnn.cell_wrappers import DeviceWrapper
from tf_keras.layers.rnn.cell_wrappers import DropoutWrapper
from tf_keras.layers.rnn.cell_wrappers import ResidualWrapper
# Convolutional-recurrent layers.
from tf_keras.layers.rnn.conv_lstm1d import ConvLSTM1D
from tf_keras.layers.rnn.conv_lstm2d import ConvLSTM2D
from tf_keras.layers.rnn.conv_lstm3d import ConvLSTM3D
from tf_keras.layers.rnn.cudnn_gru import CuDNNGRU
# cuDNN recurrent layers.
from tf_keras.layers.rnn.cudnn_lstm import CuDNNLSTM
from tf_keras.layers.rnn.time_distributed import TimeDistributed
from tf_keras.layers.serialization import deserialize
from tf_keras.layers.serialization import deserialize_from_json
from tf_keras.layers.serialization import get_builtin_layer
from tf_keras.layers.serialization import serialize
class VersionAwareLayers:
"""Utility to be used internally to access layers in a V1/V2-aware fashion.
When using layers within the TF-Keras codebase, under the constraint that
e.g. `layers.BatchNormalization` should be the `BatchNormalization` version
corresponding to the current runtime (TF1 or TF2), do not simply access
`layers.BatchNormalization` since it would ignore e.g. an early
`compat.v2.disable_v2_behavior()` call. Instead, use an instance
of `VersionAwareLayers` (which you can use just like the `layers` module).
"""
def __getattr__(self, name):
serialization.populate_deserializable_objects()
if name in serialization.LOCAL.ALL_OBJECTS:
return serialization.LOCAL.ALL_OBJECTS[name]
return super().__getattr__(name)
| tf-keras/tf_keras/layers/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/layers/__init__.py",
"repo_id": "tf-keras",
"token_count": 4901
} | 220 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Dense layer."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Dense")
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`). These are all attributes of
`Dense`.
Note: If the input to the layer has a rank greater than 2, then `Dense`
computes the dot product between the `inputs` and the `kernel` along the
last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).
For example, if input has dimensions `(batch_size, d0, d1)`, then we create
a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2
of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are
`batch_size * d0` such sub-tensors). The output in this case will have
shape `(batch_size, d0, units)`.
Besides, layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
When a popular kwarg `input_shape` is passed, then keras will create
an input layer to insert before the current layer. This can be treated
equivalent to explicitly defining an `InputLayer`.
Example:
>>> # Create a `Sequential` model and add a Dense layer as the first layer.
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(tf.keras.layers.Dense(32, activation='relu'))
>>> # Now the model will take as input arrays of shape (None, 16)
>>> # and output arrays of shape (None, 32).
>>> # Note that after the first layer, you don't need to specify
>>> # the size of the input anymore:
>>> model.add(tf.keras.layers.Dense(32))
>>> model.output_shape
(None, 32)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
@utils.allow_initializer_layout
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.units = int(units) if not isinstance(units, int) else units
if self.units < 0:
raise ValueError(
"Received an invalid value for `units`, expected "
f"a positive integer. Received: units={units}"
)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
dtype = tf.as_dtype(self.dtype or backend.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError(
"A Dense layer can only be built with a floating-point "
f"dtype. Received: dtype={dtype}"
)
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer "
"should be defined. Found None. "
f"Full input shape received: {input_shape}"
)
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight(
"kernel",
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True,
)
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:
inputs = tf.cast(inputs, dtype=self._compute_dtype_object)
is_ragged = isinstance(inputs, tf.RaggedTensor)
if is_ragged:
# In case we encounter a RaggedTensor with a fixed last dimension
# (last dimension not ragged), we can flatten the input and restore
# the ragged dimensions at the end.
if tf.compat.dimension_value(inputs.shape[-1]) is None:
raise ValueError(
"Dense layer only supports RaggedTensors when the "
"innermost dimension is non-ragged. Received: "
f"inputs.shape={inputs.shape}."
)
original_inputs = inputs
if inputs.flat_values.shape.rank > 1:
inputs = inputs.flat_values
else:
# Innermost partition is encoded using uniform_row_length.
# (This is unusual, but we can handle it.)
if inputs.shape.rank == 2:
inputs = inputs.to_tensor()
is_ragged = False
else:
for _ in range(original_inputs.ragged_rank - 1):
inputs = inputs.values
inputs = inputs.to_tensor()
original_inputs = tf.RaggedTensor.from_nested_row_splits(
inputs, original_inputs.nested_row_splits[:-1]
)
rank = inputs.shape.rank
if rank == 2 or rank is None:
# We use embedding_lookup_sparse as a more efficient matmul
# operation for large sparse input tensors. The op will result in a
# sparse gradient, as opposed to
# sparse_ops.sparse_tensor_dense_matmul which results in dense
# gradients. This can lead to sigfinicant speedups, see b/171762937.
if isinstance(inputs, tf.SparseTensor):
# We need to fill empty rows, as the op assumes at least one id
# per row.
inputs, _ = tf.sparse.fill_empty_rows(inputs, 0)
# We need to do some munging of our input to use the embedding
# lookup as a matrix multiply. We split our input matrix into
# separate ids and weights tensors. The values of the ids tensor
# should be the column indices of our input matrix and the
# values of the weights tensor can continue to the actual matrix
# weights. The column arrangement of ids and weights will be
# summed over and does not matter. See the documentation for
# sparse_ops.sparse_tensor_dense_matmul a more detailed
# explanation of the inputs to both ops.
ids = tf.SparseTensor(
indices=inputs.indices,
values=inputs.indices[:, 1],
dense_shape=inputs.dense_shape,
)
weights = inputs
outputs = tf.nn.embedding_lookup_sparse(
self.kernel, ids, weights, combiner="sum"
)
else:
outputs = tf.matmul(a=inputs, b=self.kernel)
# Broadcast kernel to inputs.
else:
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.kernel.shape[-1]]
outputs.set_shape(output_shape)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
outputs = self.activation(outputs)
if is_ragged:
outputs = original_inputs.with_flat_values(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError(
"The last dimension of the input shape of a Dense layer "
"should be defined. Found None. "
f"Received: input_shape={input_shape}"
)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = super().get_config()
config.update(
{
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(
self.kernel_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| tf-keras/tf_keras/layers/core/dense.py/0 | {
"file_path": "tf-keras/tf_keras/layers/core/dense.py",
"repo_id": "tf-keras",
"token_count": 5705
} | 221 |
"""Test for dynamic_lookup layer."""
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
import tf_keras as keras
from tf_keras.layers.experimental import dynamic_lookup
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_utils.run_v2_only
class DynamicLookupTest(test_combinations.TestCase):
def test_dynamic_lookup_layer(self):
vocabulary_size = 5
eviction_policy = "LFU"
vocab = tf.constant(["apple", "banana", "cherry", "grape", "juice"])
# vocab_frequency({apple:0, banana:0, cherry:0, grape:0, juice:0})
# hash table size is 1.2Xvocab size. in this case 5x1.2 = 6
layer = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=vocab,
eviction_policy=eviction_policy,
)
input_1 = tf.constant(["apple", "banana", "cherry"])
layer(input_1)
# vocab_frequency({apple:1, banana:1, cherry:1, grape:0, juice:0})
input_2 = tf.constant(["apple", "banana", "mango"])
layer(input_2)
# vocab_frequency({apple:2, banana:2, cherry:1, grape:0, juice:0, mango:
# 1})
input_3 = tf.constant(["fig", "date", "date"])
layer(input_3)
# vocab_frequency({apple:2, banana:2, cherry:1, fig:1, date:1, mango:1})
input_4 = tf.constant(["banana", "jackfruit", "honeydew"])
layer(input_4)
# vocab_frequency({apple:2, banana:3, jackfruit:1, fig:1, date:1,
# honeydew:1})
input_5 = tf.constant(["banana", "apple", "jackfruit"])
# vocab_frequency({apple:3, banana:4, jackfruit:2, fig:1, date:1,
# honeydew:1})
outputs = layer(input_5)
expected_output = tf.constant([1, 0, 5], dtype=tf.int64)
# verify if look up values are accurate
self.assertTrue(tf.reduce_all(tf.equal(outputs, expected_output)))
# Check the shape of the output
self.assertEqual(outputs.shape, input_4.shape)
# Check that the top-k vocab is correctly updated
top_k_vocab = layer.get_top_vocabulary(3)
expected_top_k_vocab = tf.constant(
["banana", "apple", "jackfruit"],
dtype=tf.string,
)
self.assertTrue(
tf.reduce_all(tf.equal(top_k_vocab, expected_top_k_vocab))
)
def test_layer_with_model(self):
train_data = np.array(
[
["a", "j", "c", "d", "e"],
["a", "h", "i", "j", "b"],
["i", "h", "c", "j", "e"],
]
)
train_labels = np.array([0, 1, 2])
vocab = tf.constant(["a", "b", "c", "d", "e"])
vocabulary_size = 5
eviction_policy = "LFU"
# Define the model
model = keras.models.Sequential(
[
dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=vocab,
eviction_policy=eviction_policy,
),
keras.layers.Flatten(),
keras.layers.Dense(3, activation="softmax"),
]
)
# Compile the model
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
result = model.fit(
train_data,
train_labels,
epochs=10,
batch_size=1,
)
# Assert model trains
self.assertEqual(result.history["loss"][0] > 0, True)
def test_model_save_load(self):
train_data = np.array(
[
["a", "j", "c", "d", "e"],
["a", "h", "i", "j", "b"],
["i", "h", "c", "j", "e"],
]
)
train_labels = np.array([0, 1, 2])
vocab = tf.constant(["a", "b", "c", "d", "e"])
vocabulary_size = 5
eviction_policy = "LFU"
# Define the model
model = keras.models.Sequential(
[
dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=vocab,
eviction_policy=eviction_policy,
name="dynamic_lookup",
),
keras.layers.Flatten(),
keras.layers.Dense(3, activation="softmax"),
]
)
# Compile the model
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_data,
train_labels,
epochs=10,
batch_size=1,
)
# Save the model to a temporary file
filepath = os.path.join(tempfile.gettempdir(), "tempdir")
model.save(filepath)
reloaded_model = keras.models.load_model(filepath)
self.assertTrue(
tf.reduce_all(
tf.equal(
model.get_layer("dynamic_lookup").vocabulary.numpy(),
reloaded_model.get_layer(
"dynamic_lookup"
).vocabulary.numpy(),
)
)
)
shutil.rmtree(filepath)
def test_dynamic_lookup_layer_learn_vocab_arg(self):
vocabulary_size = 5
eviction_policy = "LFU"
vocab = tf.constant(["apple", "banana", "cherry", "grape", "juice"])
# vocab_frequency({apple:0, banana:0, cherry:0, grape:0, juice:0})
# hash table size is 1.2Xvocab size. in this case 5x1.2 = 6
layer = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=vocab,
eviction_policy=eviction_policy,
)
input_1 = tf.constant(["apple", "banana", "cherry"])
layer(input_1, learn_vocab=False)
input_2 = tf.constant(["apple", "banana", "mango"])
layer(input_2, learn_vocab=False)
input_3 = tf.constant(["fig", "date", "date"])
layer(input_3, learn_vocab=False)
input_4 = tf.constant(["banana", "jackfruit", "honeydew"])
layer(input_4, learn_vocab=False)
input_5 = tf.constant(["banana", "apple", "jackfruit"])
layer(input_5, learn_vocab=False)
# Check that the top-k vocab is not updated
top_k_vocab = layer.get_top_vocabulary(5)
expected_top_k_vocab = tf.constant(
["apple", "banana", "cherry", "grape", "juice"],
dtype=tf.string,
)
self.assertTrue(
tf.reduce_all(tf.equal(top_k_vocab, expected_top_k_vocab))
)
def test_get_vocabulary(self):
vocabulary_size = 5
eviction_policy = "LFU"
vocab = tf.constant(["apple", "banana", "cherry", "grape", "juice"])
layer = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=vocab,
eviction_policy=eviction_policy,
)
input_1 = tf.constant(["apple", "banana", "cherry"])
layer(input_1, learn_vocab=False)
vocabulary_output = layer.get_vocabulary()
self.assertTrue(tf.reduce_all(tf.equal(vocabulary_output, vocab)))
def test_default_vocab(self):
# test default initial vocabulary tf.string
vocabulary_size = 5
eviction_policy = "LFU"
layer1 = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=tf.string,
eviction_policy=eviction_policy,
)
input_1 = tf.constant(["apple", "banana", "cherry"])
layer1(input_1, learn_vocab=False)
vocabulary_output = layer1.get_vocabulary()
self.assertEqual(vocabulary_output.dtype, tf.string)
self.assertEqual(tf.shape(vocabulary_output)[0], vocabulary_size)
# test default initial vocabulary tf.int32
layer2 = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=tf.int32,
eviction_policy=eviction_policy,
)
input_2 = tf.constant([1, 2, 3], dtype=tf.int32)
layer2(input_2, learn_vocab=False)
vocabulary_output = layer2.get_vocabulary()
self.assertEqual(vocabulary_output.dtype, tf.int32)
self.assertEqual(tf.shape(vocabulary_output)[0], vocabulary_size)
# test default initial vocabulary tf.int64
layer3 = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=tf.int64,
eviction_policy=eviction_policy,
)
input_3 = tf.constant([1, 2, 3], dtype=tf.int64)
layer3(input_3, learn_vocab=False)
vocabulary_output = layer3.get_vocabulary()
self.assertEqual(vocabulary_output.dtype, tf.int64)
self.assertEqual(tf.shape(vocabulary_output)[0], vocabulary_size)
# test value error when default initial vocabulary is tf.float32
with self.assertRaises(ValueError):
layer3 = dynamic_lookup.DynamicLookup(
vocabulary_size,
initial_vocabulary=tf.float32,
eviction_policy=eviction_policy,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/experimental/dynamic_lookup_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/experimental/dynamic_lookup_test.py",
"repo_id": "tf-keras",
"token_count": 4679
} | 222 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that computes the dot product between two inputs."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer_utils
from tf_keras.layers.merging.base_merge import _Merge
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Dot")
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to a list of two tensors `a` and `b` of shape
`(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
>>> x = np.arange(10).reshape(1, 5, 2)
>>> print(x)
[[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]]
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> print(y)
[[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> tf.keras.layers.Dot(axes=(1, 2))([x, y])
<tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy=
array([[[260, 360],
[320, 445]]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2])
>>> dotted.shape
TensorShape([5, 1])
"""
def __init__(self, axes, normalize=False, **kwargs):
"""Initializes a layer that computes the element-wise dot product.
>>> x = np.arange(10).reshape(1, 5, 2)
>>> print(x)
[[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]]
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> print(y)
[[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> tf.keras.layers.Dot(axes=(1, 2))([x, y])
<tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy=
array([[[260, 360],
[320, 445]]])>
Args:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product. If a tuple, should
be two integers corresponding to the desired axis from the first
input and the desired axis from the second input, respectively. Note
that the size of the two selected axes must match.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
super().__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError(
"Invalid type for argument `axes`: it should be "
f"a list or an int. Received: axes={axes}"
)
if len(axes) != 2:
raise ValueError(
"Invalid format for argument `axes`: it should contain two "
f"elements. Received: axes={axes}"
)
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError(
"Invalid format for argument `axes`: list elements should "
f"be integers. Received: axes={axes}"
)
self.axes = axes
self.normalize = normalize
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple) or len(input_shape) != 2:
raise ValueError(
"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = input_shape[0]
shape2 = input_shape[1]
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError(
"Incompatible input shapes: "
f"axis values {shape1[axes[0]]} (at axis {axes[0]}) != "
f"{shape2[axes[1]]} (at axis {axes[1]}). "
f"Full input shapes: {shape1}, {shape2}"
)
def _merge_function(self, inputs):
base_layer_utils.no_ragged_support(inputs, self.name)
if len(inputs) != 2:
raise ValueError(
"A `Dot` layer should be called on exactly 2 inputs. "
f"Received: inputs={inputs}"
)
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [
self.axes % backend.ndim(x1),
self.axes % backend.ndim(x2),
]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % backend.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = tf.linalg.l2_normalize(x1, axis=axes[0])
x2 = tf.linalg.l2_normalize(x2, axis=axes[1])
output = backend.batch_dot(x1, x2, axes)
return output
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:
raise ValueError(
"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
"axes": self.axes,
"normalize": self.normalize,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.dot")
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
| tf-keras/tf_keras/layers/merging/dot.py/0 | {
"file_path": "tf-keras/tf_keras/layers/merging/dot.py",
"repo_id": "tf-keras",
"token_count": 3964
} | 223 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.normalization import layer_normalization
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
def _run_layernorm_correctness_test(layer, dtype="float32"):
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x: tf.cast(x, dtype="float16")))
norm = layer(input_shape=(2, 2, 2), dtype=dtype)
model.add(norm)
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)).astype(
dtype
)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_basic_layernorm(self):
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
"gamma_regularizer": keras.regularizers.l2(0.01),
"beta_regularizer": keras.regularizers.l2(0.01),
},
input_shape=(3, 4, 2),
)
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
)
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={"scale": False, "center": False},
input_shape=(3, 3),
)
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
)
test_utils.layer_test(
keras.layers.LayerNormalization, input_shape=(1, 0, 10)
)
@test_combinations.run_all_keras_modes
def test_non_fused_layernorm(self):
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={"axis": -2},
input_shape=(3, 4, 2),
)
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={"axis": (-3, -2)},
input_shape=(2, 8, 8, 3),
)
test_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={"axis": (-3, -1)},
input_shape=(2, 8, 8, 3),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer="l1", beta_regularizer="l1"
)
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm
)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@test_combinations.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@test_combinations.run_all_keras_modes
def test_layernorm_ragged_tensor(self):
x = tf.ragged.constant(
[
[[3.0, 1.0, 1.0], [4.0, 1.0, 1.0]],
[[5.0, 9.0, 1.0]],
[[1.0, 2.0, 1.0]],
],
inner_shape=(3,),
)
layer = keras.layers.LayerNormalization()
self.assertEqual(layer(x).shape, (3, None, 3))
@test_combinations.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
layer_normalization.LayerNormalization, dtype="float32"
)
@test_combinations.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
layer_normalization.LayerNormalization, dtype="float16"
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testIncorrectAxisType(self):
with self.assertRaisesRegex(
TypeError, r"Expected an int or a list/tuple of ints"
):
_ = layer_normalization.LayerNormalization(axis={"axis": -1})
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testInvalidAxis(self):
with self.assertRaisesRegex(
ValueError,
r"Invalid value for `axis` argument. "
r"Expected 0 <= axis < inputs.rank",
):
layer_norm = layer_normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDuplicateAxis(self):
with self.assertRaisesRegex(ValueError, r"Duplicate axis:"):
layer_norm = layer_normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testFusedAttr(self):
layer_norm = layer_normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
self.assertEqual(layer_norm._fused, True)
class LayerNormalizationNumericsTest(test_combinations.TestCase):
"""Tests LayerNormalization has correct and numerically stable outputs."""
def _expected_layer_norm(
self, x, beta, gamma, batch_input_shape, axis, epsilon
):
"""Returns the layer norm, which is computed using NumPy."""
broadcast_shape = [
batch_input_shape[i] if i in axis else 1
for i in range(len(batch_input_shape))
]
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
expected = (x - mean) / np.sqrt(var + epsilon)
expected *= np.reshape(gamma, broadcast_shape)
expected += np.reshape(beta, broadcast_shape)
return expected
def _test_forward_pass(
self,
batch_input_shape,
axis,
fp64_tol=1e-14,
fp32_tol=1e-6,
fp16_tol=1e-2,
):
"""Tests the forward pass of layer layer_normalization.
Args:
batch_input_shape: The input shape that will be used to test,
including the batch dimension.
axis: A list of axes to normalize. Will be passed to the `axis`
argument of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype="float64").reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype="float64").reshape(
param_shape
)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
expected = self._expected_layer_norm(
x, beta, gamma, batch_input_shape, axis, epsilon
)
for dtype in "float64", "float32", "float16":
norm = layer_normalization.LayerNormalization(
axis=axis,
dtype=dtype,
batch_input_shape=batch_input_shape,
epsilon=epsilon,
beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma),
)
y = norm(keras.backend.cast(x, dtype))
actual = keras.backend.eval(y)
if dtype == "float64":
tol = fp64_tol
elif dtype == "float32":
tol = fp32_tol
else:
assert dtype == "float16"
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances,
# because some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least
# 4 elements.
self._test_forward_pass((4, 3), (0,))
self._test_forward_pass((3, 4), (1,))
self._test_forward_pass((4, 3, 2), (0,))
self._test_forward_pass((2, 4, 2), (1,))
self._test_forward_pass((2, 3, 4), (2,), fp16_tol=5e-2)
self._test_forward_pass((2, 3, 2), (0, 2))
self._test_forward_pass((2, 2, 2, 2), (1, 3))
self._test_forward_pass((2, 2, 2, 2), (2, 3))
self._test_forward_pass((2, 3, 4, 5), (3,))
def _test_backward_pass(
self,
batch_input_shape,
axis,
fp64_tol=1e-5,
fp32_tol=1e-5,
fp16_tol=2e-2,
):
"""Tests the backwards pass of layer layer_normalization.
Args:
batch_input_shape: The input shape that will be used to test,
including the batch dimension.
axis: A list of axes to normalize. Will be passed to the `axis`
argument of Layerlayer_normalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype="float64").reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype="float64").reshape(
param_shape
)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
# Float64 must come first in this list, as we use the float64
# numerical gradients to compare to the float32 and float16 symbolic
# gradients as well. Computing float32/float16 numerical gradients
# is too numerically unstable.
for dtype in "float64", "float32", "float16":
norm = layer_normalization.LayerNormalization(
axis=axis,
dtype=dtype,
batch_input_shape=batch_input_shape,
epsilon=epsilon,
beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma),
)
norm.build(x.shape)
def forward_fn(x, beta, gamma):
# We must monkey-patch the attributes of `norm` with the
# function arguments, so that the gradient checker will
# properly compute their gradients. The gradient checker
# computes gradients with respect to the input arguments of
# `f`.
with tf.compat.v1.test.mock.patch.object(
norm, "beta", beta
):
with tf.compat.v1.test.mock.patch.object(
norm, "gamma", gamma
):
return norm(x)
results = tf.test.compute_gradient(
forward_fn,
[keras.backend.cast(x, dtype), norm.beta, norm.gamma],
)
(
[x_grad_t, beta_grad_t, gamma_grad_t],
[x_grad_n, beta_grad_n, gamma_grad_n],
) = results
if dtype == "float64":
# We use the float64 numeric gradients as the reference, to
# compare against the symbolic gradients for all dtypes.
x_grad_ref = x_grad_n
beta_grad_ref = beta_grad_n
gamma_grad_ref = gamma_grad_n
tol = fp64_tol
elif dtype == "float32":
tol = fp32_tol
else:
assert dtype == "float16"
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances,
# because some of the values are very close to zero.
self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(
beta_grad_t, beta_grad_ref, rtol=tol, atol=tol
)
self.assertAllClose(
gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol
)
# The gradient_checker_v2 does not work properly with LayerNorm in graph
# mode.
@test_utils.run_v2_only
def test_backward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least
# 4 elements.
self._test_backward_pass((4, 3), (0,))
self._test_backward_pass((2, 4, 2), (1,))
self._test_backward_pass((2, 3, 4), (2,))
self._test_backward_pass(
(2, 3, 2), (0, 2), fp64_tol=5e-4, fp32_tol=5e-4
)
self._test_backward_pass((2, 2, 2, 2), (1, 3))
self._test_backward_pass((2, 2, 2, 2), (2, 3))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/normalization/layer_normalization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/normalization/layer_normalization_test.py",
"repo_id": "tf-keras",
"token_count": 7991
} | 224 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for pooling 3D layers."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(
self,
pool_function,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, "pool_size")
self.strides = conv_utils.normalize_tuple(
strides, 3, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == "channels_first":
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
)
if self.data_format == "channels_first":
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(
len_dim1, self.pool_size[0], self.padding, self.strides[0]
)
len_dim2 = conv_utils.conv_output_length(
len_dim2, self.pool_size[1], self.padding, self.strides[1]
)
len_dim3 = conv_utils.conv_output_length(
len_dim3, self.pool_size[2], self.padding, self.strides[2]
)
if self.data_format == "channels_first":
return tf.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]
)
else:
return tf.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]
)
def get_config(self):
config = {
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/pooling/base_pooling3d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/pooling/base_pooling3d.py",
"repo_id": "tf-keras",
"token_count": 2175
} | 225 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras hashing preprocessing layer."""
import itertools
import random
import string
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import hashing
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_generator(
word_gen, tf.string, tf.TensorShape([])
)
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = tf.strings.to_hash_bucket(i, num_buckets=2)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(None,), dtype=tf.string, name="word")
layer = hashing.Hashing(num_bins=2)
_ = layer(input_1)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_generator(
word_gen, tf.string, tf.TensorShape([])
)
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = f"hashing|batch_{batch_size}"
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100,
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name
)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/hashing_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/hashing_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1575
} | 226 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hashing layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import input_layer
from tf_keras.engine import training
from tf_keras.layers.preprocessing import hashing
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class HashingTest(test_combinations.TestCase):
@parameterized.named_parameters(
("list", list),
("tuple", tuple),
("numpy", np.array),
("array_like", preprocessing_test_utils.ArrayLike),
)
def test_tensor_like_inputs(self, data_fn):
input_data = data_fn([0, 1, 2, 3, 4])
expected_output = [1, 0, 1, 0, 2]
layer = hashing.Hashing(num_bins=3)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
def test_hash_single_bin(self):
layer = hashing.Hashing(num_bins=1)
inp = np.asarray([["A"], ["B"], ["C"], ["D"], ["E"]])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = [["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ["omar", "stringer", "marlo", "wire", "skywalker"]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
def test_hash_sparse_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
def test_hash_sparse_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
def test_hash_sparse_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
def test_hash_sparse_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_hash_ragged_string_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[0, 0, 1, 0], [1, 0, 0]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_input_mask_value(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
empty_mask_output = empty_mask_layer(inp_data)
omar_mask_output = omar_mask_layer(inp_data)
# Outputs should be one more than test_hash_ragged_string_input_farmhash
# (the zeroth bin is now reserved for masks).
expected_output = [[1, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, empty_mask_output)
# 'omar' should map to 0.
expected_output = [[0, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, omar_mask_output)
def test_hash_ragged_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 0, 0, 2], [1, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_string_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
out_data = layer(inp_data)
# Same hashed output as test_hash_dense_input_siphash
expected_output = [[0, 1, 0, 1], [0, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
out_data = layer_2(inp_data)
expected_output = [[1, 0, 1, 0], [1, 1, 0]]
self.assertAllEqual(expected_output, out_data)
out_t = layer_2(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 1, 0, 1], [2, 1, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = hashing.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = hashing.Hashing(num_bins=-1)
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=2, salt="string")
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=1, salt=tf.constant([133, 137]))
def test_one_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(1,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="one_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [1.0, 1.0, 1.0]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(3,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="multi_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [2.0, 2.0, 1.0]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(3,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
@parameterized.named_parameters(
("int32", tf.int32),
("int64", tf.int64),
)
def test_output_dtype(self, dtype):
input_data = keras.Input(batch_size=16, shape=(4,), dtype="string")
layer = hashing.Hashing(num_bins=3, dtype=dtype)
output = layer(input_data)
self.assertAllEqual(output.dtype, dtype)
def test_legacy_dtype_compat(self):
inputs = keras.Input(batch_size=16, shape=(4,), dtype="string")
layer = hashing.Hashing(num_bins=3, dtype="float32")
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, tf.int64)
# In TF1 we sometimes face an explicit dtype=None in the config.
layer = hashing.Hashing(num_bins=3, dtype=None)
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, tf.int64)
@parameterized.named_parameters(
("float32", tf.float32),
("float64", tf.float64),
)
def test_one_hot_output_dtype(self, dtype):
input_data = keras.Input(batch_size=16, shape=(1,), dtype="string")
layer = hashing.Hashing(num_bins=3, output_mode="one_hot", dtype=dtype)
output = layer(input_data)
self.assertAllEqual(output.dtype, dtype)
def test_hash_compute_output_signature(self):
input_shape = tf.TensorShape([2, 3])
input_spec = tf.TensorSpec(input_shape, tf.string)
layer = hashing.Hashing(num_bins=2)
output_spec = layer.compute_output_signature(input_spec)
self.assertEqual(output_spec.shape.dims, input_shape.dims)
self.assertEqual(output_spec.dtype, tf.int64)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = hashing.Hashing(num_bins=2, name="hashing")
config = layer.get_config()
layer_1 = hashing.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_saved_model(self):
input_data = np.array(
["omar", "stringer", "marlo", "wire", "skywalker"]
)
inputs = keras.Input(shape=(None,), dtype=tf.string)
outputs = hashing.Hashing(num_bins=100)(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@test_utils.run_v2_only
def test_save_keras_v3(self):
input_data = np.array(
["omar", "stringer", "marlo", "wire", "skywalker"]
)
inputs = keras.Input(shape=(None,), dtype=tf.string)
outputs = hashing.Hashing(num_bins=100)(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_model.keras")
model.save(output_path, save_format="keras_v3")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@parameterized.named_parameters(
(
"list_input",
[1, 2, 3],
[1, 1, 1],
),
(
"list_input_2d",
[[1], [2], [3]],
[[1], [1], [1]],
),
(
"list_input_2d_multiple",
[[1, 2], [2, 3], [3, 4]],
[[1, 1], [1, 1], [1, 1]],
),
(
"list_input_3d",
[[[1], [2]], [[2], [3]], [[3], [4]]],
[[[1], [1]], [[1], [1]], [[1], [1]]],
),
)
def test_hash_list_input(self, input_data, expected):
layer = hashing.Hashing(num_bins=2)
out_data = layer(input_data)
self.assertAllEqual(expected, out_data.numpy().tolist())
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/hashing_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/hashing_test.py",
"repo_id": "tf-keras",
"token_count": 8847
} | 227 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for preprocessing layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.utils import tf_utils
INT = "int"
ONE_HOT = "one_hot"
MULTI_HOT = "multi_hot"
COUNT = "count"
TF_IDF = "tf_idf"
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)):
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def listify_tensors(x):
"""Convert any tensors or numpy arrays to lists for config serialization."""
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, np.ndarray):
x = x.tolist()
return x
def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
axis=-1,
binary_output=binary_output,
)
result = tf.cast(result, dtype)
if inputs.shape.rank == 1:
output_shape = (depth,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, depth)
result = tf.SparseTensor(
indices=result.indices, values=result.values, dense_shape=output_shape
)
return result
def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input."""
result = tf.math.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
dtype=dtype,
axis=-1,
binary_output=binary_output,
)
if inputs.shape.rank == 1:
result.set_shape(tf.TensorShape((depth,)))
else:
batch_size = inputs.shape.as_list()[0]
result.set_shape(tf.TensorShape((batch_size, depth)))
return result
def expand_dims(inputs, axis):
"""Expand dims on sparse, ragged, or dense tensors."""
if tf_utils.is_sparse(inputs):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype="float32",
sparse=False,
count_weights=None,
idf_weights=None,
):
"""Encodes categoical inputs according to output_mode."""
if output_mode == INT:
return tf.identity(tf.cast(inputs, dtype))
original_shape = inputs.shape
# In all cases, we should uprank scalar input to a single sample.
if inputs.shape.rank == 0:
inputs = expand_dims(inputs, -1)
# One hot will unprank only if the final output dimension is not already 1.
if output_mode == ONE_HOT:
if inputs.shape[-1] != 1:
inputs = expand_dims(inputs, -1)
# TODO(b/190445202): remove output rank restriction.
if inputs.shape.rank > 2:
raise ValueError(
"When output_mode is not `'int'`, maximum supported output rank "
f"is 2. Received output_mode {output_mode} and input shape "
f"{original_shape}, "
f"which would result in output rank {inputs.shape.rank}."
)
binary_output = output_mode in (MULTI_HOT, ONE_HOT)
if sparse:
bincounts = sparse_bincount(
inputs, depth, binary_output, dtype, count_weights
)
else:
bincounts = dense_bincount(
inputs, depth, binary_output, dtype, count_weights
)
if output_mode != TF_IDF:
return bincounts
if idf_weights is None:
raise ValueError(
"When output mode is `'tf_idf'`, idf_weights must be provided. "
f"Received: output_mode={output_mode} and idf_weights={idf_weights}"
)
if sparse:
value_weights = tf.gather(idf_weights, bincounts.indices[:, -1])
return tf.SparseTensor(
bincounts.indices,
value_weights * bincounts.values,
bincounts.dense_shape,
)
else:
return tf.multiply(bincounts, idf_weights)
def compute_shape_for_encode_categorical(shape, output_mode, depth):
"""Computes the output shape of `encode_categorical_inputs`."""
if output_mode == INT:
return tf.TensorShape(shape)
if not shape:
return tf.TensorShape([depth])
if output_mode == ONE_HOT and shape[-1] != 1:
return tf.TensorShape(shape + [depth])
else:
return tf.TensorShape(shape[:-1] + [depth])
| tf-keras/tf_keras/layers/preprocessing/preprocessing_utils.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/preprocessing_utils.py",
"repo_id": "tf-keras",
"token_count": 2186
} | 228 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gaussian dropout layer."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class NoiseLayersTest(test_combinations.TestCase):
def test_GaussianDropout(self):
test_utils.layer_test(
keras.layers.GaussianDropout,
kwargs={"rate": 0.5},
input_shape=(3, 2, 3),
)
def _make_model(self, dtype):
assert dtype in (tf.float32, tf.float64)
model = keras.Sequential()
model.add(keras.layers.Dense(8, input_shape=(32,), dtype=dtype))
layer = keras.layers.GaussianDropout(0.1, dtype=dtype)
model.add(layer)
return model
def _train_model(self, dtype):
model = self._make_model(dtype)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((8, 32)), np.zeros((8, 8)))
def test_gaussian_dropout_float32(self):
self._train_model(tf.float32)
def test_gaussian_dropout_float64(self):
self._train_model(tf.float64)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/regularization/gaussian_dropout_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/regularization/gaussian_dropout_test.py",
"repo_id": "tf-keras",
"token_count": 769
} | 229 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for N-D convolutional LSTM layers."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine import base_layer
from tf_keras.layers.rnn.base_conv_rnn import ConvRNN
from tf_keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from tf_keras.utils import conv_utils
class ConvLSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
"""Cell class for the ConvLSTM layer.
Args:
rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers, specifying the strides of
the convolution. Specifying any stride value != 1 is incompatible with
specifying any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
no padding. `"same"` results in padding evenly to the left/right or
up/down of the input such that output has the same height/width
dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
dilation_rate: An integer or tuple/list of n integers, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any `strides`
value != 1.
activation: Activation function to use. If you don't specify anything, no
activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
Call arguments:
inputs: A (2+ `rank`)D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="hard_sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.rank = rank
if self.rank > 3:
raise ValueError(
f"Rank {rank} convolutions are not currently "
f"implemented. Received: rank={rank}"
)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, self.rank, "kernel_size"
)
self.strides = conv_utils.normalize_tuple(
strides, self.rank, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, self.rank, "dilation_rate"
)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
super().build(input_shape)
if self.data_format == "channels_first":
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError(
"The channel dimension of the inputs (last axis) should be "
"defined. Found None. Full input shape received: "
f"input_shape={input_shape}"
)
input_dim = input_shape[channel_axis]
self.kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
recurrent_kernel_shape = self.kernel_size + (
self.filters,
self.filters * 4,
)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name="recurrent_kernel",
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return backend.concatenate(
[
self.bias_initializer(
(self.filters,), *args, **kwargs
),
initializers.get("ones")(
(self.filters,), *args, **kwargs
),
self.bias_initializer(
(self.filters * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name="bias",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
# dropout matrices for input units
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
# dropout matrices for recurrent units
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4
)
if 0 < self.dropout < 1.0:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.0:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(kernel_i, kernel_f, kernel_c, kernel_o) = tf.split(
self.kernel, 4, axis=self.rank + 1
)
(
recurrent_kernel_i,
recurrent_kernel_f,
recurrent_kernel_c,
recurrent_kernel_o,
) = tf.split(self.recurrent_kernel, 4, axis=self.rank + 1)
if self.use_bias:
bias_i, bias_f, bias_c, bias_o = tf.split(self.bias, 4)
else:
bias_i, bias_f, bias_c, bias_o = None, None, None, None
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
@property
def _conv_func(self):
if self.rank == 1:
return backend.conv1d
if self.rank == 2:
return backend.conv2d
if self.rank == 3:
return backend.conv3d
def input_conv(self, x, w, b=None, padding="valid"):
conv_out = self._conv_func(
x,
w,
strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if b is not None:
conv_out = backend.bias_add(
conv_out, b, data_format=self.data_format
)
return conv_out
def recurrent_conv(self, x, w):
strides = conv_utils.normalize_tuple(
1, self.rank, "strides", allow_zero=True
)
conv_out = self._conv_func(
x, w, strides=strides, padding="same", data_format=self.data_format
)
return conv_out
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM(ConvRNN):
"""Abstract N-D Convolutional LSTM layer (used as implementation base).
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
By default hyperbolic tangent activation function is applied
(`tanh(x)`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. (default False)
return_state: Boolean Whether to return the last state
in addition to the output. (default False)
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="hard_sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
dropout=0.0,
recurrent_dropout=0.0,
**kwargs,
):
cell = ConvLSTMCell(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
name="conv_lstm_cell",
dtype=kwargs.get("dtype"),
)
super().__init__(
rank,
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs,
)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super().call(
inputs, mask=mask, training=training, initial_state=initial_state
)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
}
base_config = super().get_config()
del base_config["cell"]
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| tf-keras/tf_keras/layers/rnn/base_conv_lstm.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/base_conv_lstm.py",
"repo_id": "tf-keras",
"token_count": 11005
} | 230 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast LSTM layer backed by cuDNN."""
import collections
import tensorflow.compat.v2 as tf
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.layers.rnn import gru_lstm_utils
from tf_keras.layers.rnn.base_cudnn_rnn import _CuDNNRNN
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.layers.CuDNNLSTM"])
class CuDNNLSTM(_CuDNNRNN):
"""Fast LSTM implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Args:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used
for the linear transformation of the inputs.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output. in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to
the output.
go_backwards: Boolean (default False). If True, process the input
sequence backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each
sample at index i in a batch will be used as initial state for the
sample of index i in the following batch.
"""
def __init__(
self,
units,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs
):
self.units = units
cell_spec = collections.namedtuple("cell", "state_size")
self._cell = cell_spec(state_size=(self.units, self.units))
super().__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs
)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super().build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return tf.concat(
[
self.bias_initializer(
(self.units * 5,), *args, **kwargs
),
tf.compat.v1.ones_initializer()(
(self.units,), *args, **kwargs
),
self.bias_initializer(
(self.units * 2,), *args, **kwargs
),
],
axis=0,
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 8,),
name="bias",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_c = initial_state[1]
input_h = tf.expand_dims(input_h, axis=0)
input_c = tf.expand_dims(input_c, axis=0)
params = gru_lstm_utils.canonical_to_params(
weights=[
self.kernel[:, : self.units],
self.kernel[:, self.units : self.units * 2],
self.kernel[:, self.units * 2 : self.units * 3],
self.kernel[:, self.units * 3 :],
self.recurrent_kernel[:, : self.units],
self.recurrent_kernel[:, self.units : self.units * 2],
self.recurrent_kernel[:, self.units * 2 : self.units * 3],
self.recurrent_kernel[:, self.units * 3 :],
],
biases=[
self.bias[: self.units],
self.bias[self.units : self.units * 2],
self.bias[self.units * 2 : self.units * 3],
self.bias[self.units * 3 : self.units * 4],
self.bias[self.units * 4 : self.units * 5],
self.bias[self.units * 5 : self.units * 6],
self.bias[self.units * 6 : self.units * 7],
self.bias[self.units * 7 :],
],
shape=self._vector_shape,
)
args = {
"input": inputs,
"input_h": input_h,
"input_c": input_c,
"params": params,
"is_training": True,
}
outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV2(**args)
if self.stateful or self.return_state:
h = h[0]
c = c[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = tf.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h, c]
def get_config(self):
config = {
"units": self.units,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/rnn/cudnn_lstm.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/cudnn_lstm.py",
"repo_id": "tf-keras",
"token_count": 4596
} | 231 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for RNN cells and layers."""
import tensorflow.compat.v2 as tf
from tf_keras.utils import control_flow_util
# isort: off
from tensorflow.python.platform import tf_logging as logging
def standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Args:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The
# initial_state and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second
# case, the inputs will contain initial_state and constants as eager
# tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states,
# which could be a list of items, or list of list if the initial_state
# is complex structure, and finally followed by constants which is a
# flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return hasattr(state_size, "__len__") and not isinstance(
state_size, tf.TensorShape
)
def generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
return generate_zero_filled_state(batch_size, cell.state_size, dtype)
def generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
f"state. Received: batch_size={batch_size_tensor}, dtype={dtype}"
)
def create_zeros(unnested_state_size):
flat_dims = tf.TensorShape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return tf.zeros(init_state_size, dtype=dtype)
if tf.nest.is_nested(state_size):
return tf.nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
def caching_device(rnn_cell):
"""Returns the caching device for the RNN variable.
This is useful for distributed training, when variable is not located as
same device as the training worker. By enabling the device cache, this
allows worker to read the variable once and cache locally, rather than read
it every time step from remote when it is needed.
Note that this is assuming the variable that cell needs for each time step
is having the same value in the forward path, and only gets updated in the
backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If
the cell body relies on any variable that gets updated every time step, then
caching device will cause it to read the stall value.
Args:
rnn_cell: the rnn cell instance.
"""
if tf.executing_eagerly():
# caching_device is not supported in eager mode.
return None
if not getattr(rnn_cell, "_enable_caching_device", False):
return None
# Don't set a caching device when running in a loop, since it is possible
# that train steps could be wrapped in a tf.while_loop. In that scenario
# caching prevents forward computations in loop iterations from re-reading
# the updated weights.
if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()):
logging.warning(
"Variable read device caching has been disabled because the "
"RNN is in tf.while_loop loop context, which will cause "
"reading stalled value in forward path. This could slow down "
"the training due to duplicated variable reads. Please "
"consider updating your code to remove tf.while_loop if possible."
)
return None
if (
rnn_cell._dtype_policy.compute_dtype
!= rnn_cell._dtype_policy.variable_dtype
):
logging.warning(
"Variable read device caching has been disabled since it "
"doesn't work with the mixed precision API. This is "
"likely to cause a slowdown for RNN training due to "
"duplicated read of variable for each timestep, which "
"will be significant in a multi remote worker setting. "
"Please consider disabling mixed precision API if "
"the performance has been affected."
)
return None
# Cache the value on the device that access the variable.
return lambda op: op.device
def config_for_enable_caching_device(rnn_cell):
"""Return the dict config for RNN cell wrt to enable_caching_device field.
Since enable_caching_device is a internal implementation detail for speed up
the RNN variable read when running on the multi remote worker setting, we
don't want this config to be serialized constantly in the JSON. We will only
serialize this field when a none default value is used to create the cell.
Args:
rnn_cell: the RNN cell for serialize.
Returns:
A dict which contains the JSON config for enable_caching_device value or
empty dict if the enable_caching_device value is same as the default
value.
"""
default_enable_caching_device = (
tf.compat.v1.executing_eagerly_outside_functions()
)
if rnn_cell._enable_caching_device != default_enable_caching_device:
return {"enable_caching_device": rnn_cell._enable_caching_device}
return {}
| tf-keras/tf_keras/layers/rnn/rnn_utils.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/rnn_utils.py",
"repo_id": "tf-keras",
"token_count": 2868
} | 232 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import layers as keras_layers
from tf_keras.legacy_tf_layers import base
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.__internal__.legacy.layers.Dense"])
class Dense(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Dense`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
dense = tf.compat.v1.layers.Dense(units=3)
```
After:
```python
dense = tf.keras.layers.Dense(units=3)
```
@end_compatibility
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.dense"])
def dense(
inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Dense`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.dense(x, units=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28,))
y = tf.keras.layers.Dense(units=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.dense` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.Dense` instead.",
stacklevel=2,
)
layer = Dense(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.Dropout"])
class Dropout(keras_layers.Dropout, base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Args:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`.
for behavior.
name: The name of the layer (string).
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Dropout`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
dropout = tf.compat.v1.layers.Dropout()
```
After:
```python
dropout = tf.keras.layers.Dropout()
```
@end_compatibility
"""
def __init__(
self, rate=0.5, noise_shape=None, seed=None, name=None, **kwargs
):
# Force the rng type to be legacy stateful since the new stateful code
# path is not supported by legacy layer.
super().__init__(
rate=rate,
noise_shape=noise_shape,
seed=seed,
name=name,
rng_type="legacy_stateful",
**kwargs
)
def call(self, inputs, training=False):
return super().call(inputs, training=training)
@keras_export(v1=["keras.__internal__.legacy.layers.dropout"])
def dropout(
inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None
):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Args:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Dropout`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.dropout(x)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Dropout()(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.dropout` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.Dropout` instead.",
stacklevel=2,
)
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer(inputs, training=training)
@keras_export(v1=["keras.__internal__.legacy.layers.Flatten"])
class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Args:
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Flatten`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
flatten = tf.compat.v1.layers.Flatten()
```
After:
```python
flatten = tf.keras.layers.Flatten()
```
@end_compatibility
"""
pass
@keras_export(v1=["keras.__internal__.legacy.layers.flatten"])
def flatten(inputs, name=None, data_format="channels_last"):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Args:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
Returns:
Reshaped tensor.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Flatten`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.flatten(x)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Flatten()(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.flatten` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.Flatten` instead.",
stacklevel=2,
)
layer = Flatten(name=name, data_format=data_format)
return layer(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
| tf-keras/tf_keras/legacy_tf_layers/core.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/core.py",
"repo_id": "tf-keras",
"token_count": 7134
} | 233 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Metric classes."""
import abc
import types
import warnings
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.dtensor import dtensor_api as dtensor
from tf_keras.dtensor import utils as dtensor_utils
from tf_keras.engine import base_layer
from tf_keras.engine import base_layer_utils
from tf_keras.engine import keras_tensor
from tf_keras.saving.legacy.saved_model import metric_serialization
from tf_keras.utils import generic_utils
from tf_keras.utils import losses_utils
from tf_keras.utils import metrics_utils
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export("keras.metrics.Metric")
class Metric(base_layer.Layer, metaclass=abc.ABCMeta):
"""Encapsulates metric logic and state.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Additional layer keywords arguments.
Standalone usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with `compile()` API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a scalar value or a dict of scalar values
for the metric from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
if not base_layer_utils.v2_dtype_behavior_enabled():
# We only do this when the V2 behavior is not enabled, as when it is
# enabled, the dtype already defaults to floatx.
self._dtype = (
backend.floatx() if dtype is None else tf.as_dtype(dtype).name
)
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# If `update_state` is not in eager/tf.function and it is not from a
# built-in metric, wrap it in `tf.function`. This is so that users
# writing custom metrics in v1 need not worry about control dependencies
# and return ops.
if base_layer_utils.is_in_eager_or_tf_function() or is_built_in(cls):
obj_update_state = obj.update_state
def update_state_fn(*args, **kwargs):
control_status = tf.__internal__.autograph.control_status_ctx()
ag_update_state = tf.__internal__.autograph.tf_convert(
obj_update_state, control_status
)
return ag_update_state(*args, **kwargs)
else:
if isinstance(obj.update_state, tf.__internal__.function.Function):
update_state_fn = obj.update_state
else:
update_state_fn = tf.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj
)
obj_result = obj.result
def result_fn(*args, **kwargs):
control_status = tf.__internal__.autograph.control_status_ctx()
ag_result = tf.__internal__.autograph.tf_convert(
obj_result, control_status
)
return ag_result(*args, **kwargs)
obj.result = types.MethodType(
metrics_utils.result_wrapper(result_fn), obj
)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
if any(
isinstance(arg, keras_tensor.KerasTensor)
for arg in tf.nest.flatten((args, kwargs))
):
update_op = None
else:
update_op = self.update_state(*args, **kwargs)
update_ops = []
if update_op is not None:
update_ops.append(update_op)
with tf.control_dependencies(update_ops):
return self.result()
from tf_keras.distribute import (
distributed_training_utils,
)
result_t = distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs
)
# If the metric object return a dictionary as a result, wrap it
# with our custom dict object so we can attach the metric object
# to it.
if isinstance(result_t, dict):
result_t = _MetricDict(**result_t)
# We are adding the metric object as metadata on the result
# tensor. This is required when we want to use a metric with
# `add_metric` API on a Model/Layer in graph mode. This metric
# instance will later be used to reset variable state after each
# epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self
return result_t
def __str__(self):
args = ",".join(f"{k}={v}" for k, v in self.get_config().items())
return f"{self.__class__.__name__}({args})"
def __deepcopy__(self, memo=None):
try:
new_self = self.from_config(self.get_config())
except NotImplementedError as e:
raise NotImplementedError(
"Calling `__deepcopy__()` on a TF-Keras metric "
"requires the metric to be serializable, "
"i.e. it should implement `get_config()`.\n\n"
f"Error encountered during serialization: [{e}]"
)
# Note that metrics don't implement `build()` so their variables
# are readily available after instantiation.
if self.weights:
new_self.set_weights(self.get_weights())
memo[self] = new_self
return new_self
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {"name": self.name, "dtype": self.dtype}
def reset_state(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
if not generic_utils.is_default(self.reset_states):
warnings.warn(
"Metric %s implements a `reset_states()` method; rename it "
'to `reset_state()` (without the final "s"). The name '
"`reset_states()` has been deprecated to improve API "
"consistency." % (self.__class__.__name__,),
stacklevel=2,
)
return self.reset_states()
else:
backend.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be
executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError("Must be implemented in subclasses.")
def merge_state(self, metrics):
"""Merges the state from one or more metrics.
This method can be used by distributed systems to merge the state
computed by different metric instances. Typically the state will be
stored in the form of the metric's weights. For example, a
tf.keras.metrics.Mean metric contains a list of two weight values: a
total and a count. If there were two instances of a
tf.keras.metrics.Accuracy that each independently aggregated partial
state for an overall accuracy calculation, these two metric's states
could be combined as follows:
>>> m1 = tf.keras.metrics.Accuracy()
>>> _ = m1.update_state([[1], [2]], [[0], [2]])
>>> m2 = tf.keras.metrics.Accuracy()
>>> _ = m2.update_state([[3], [4]], [[3], [4]])
>>> m2.merge_state([m1])
>>> m2.result().numpy()
0.75
Args:
metrics: an iterable of metrics. The metrics must have compatible
state.
Raises:
ValueError: If the provided iterable does not contain metrics matching
the metric's required specifications.
"""
assign_add_ops = []
for metric in metrics:
if len(self.weights) != len(metric.weights):
raise ValueError(
f"Metric {metric} is not compatible with {self}"
)
for weight, weight_to_add in zip(self.weights, metric.weights):
assign_add_ops.append(weight.assign_add(weight_to_add))
return assign_add_ops
@abc.abstractmethod
def result(self):
"""Computes and returns the scalar metric value tensor or a dict of
scalars.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
Returns:
A scalar tensor, or a dictionary of scalar tensors.
"""
raise NotImplementedError("Must be implemented in subclasses.")
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(
self,
name,
shape=(),
aggregation=tf.VariableAggregation.SUM,
synchronization=tf.VariableSynchronization.ON_READ,
initializer=None,
dtype=None,
):
"""Adds state variable. Only for use by subclasses."""
if tf.distribute.has_strategy():
strategy = tf.distribute.get_strategy()
else:
strategy = None
additional_kwargs = {}
# TODO(b/120571621): Make `ON_READ` work with TF-Keras metrics on TPU.
if backend.is_tpu_strategy(strategy):
synchronization = tf.VariableSynchronization.ON_WRITE
if getattr(self, "_mesh", None) is not None:
# When self._mesh is set, it means this metric is used for DTensor.
additional_kwargs = {
"layout": dtensor.Layout.replicated(
self._mesh, tf.TensorShape(shape).rank
)
}
if tf_utils.in_local_vars_context():
# Metrics created within a remotely-executed tf.function during
# parameter server evaluation should use tf2 Variables, so that they
# can be local variables that are freely usable and mutable within
# the function, using the
# `experimental_enable_variable_lifting=False` argument. This
# supports a visitation guarantee for model evaluation.
def local_v2_var_creator(
initializer=None, dtype=None, shape=None, **kwargs
):
init_val, var_dtype = base_layer_utils.infer_init_val_and_dtype(
initializer, dtype, shape
)
v1_only_args = ["use_resource", "collections"]
for v1_arg in v1_only_args:
kwargs.pop(v1_arg, None)
kwargs["experimental_enable_variable_lifting"] = False
return tf.Variable(
initial_value=init_val,
dtype=var_dtype,
shape=shape,
**kwargs,
)
additional_kwargs["getter"] = local_v2_var_creator
with tf_utils.maybe_init_scope(layer=self):
return super().add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation,
**additional_kwargs,
)
### End: For use by subclasses ###
@property
def trainable_weights(self):
# Overridden from Layer class to track submetric weights.
if self.trainable:
trainable_weights = self._trainable_weights
for m in self._metrics:
trainable_weights += m.trainable_weights
return self._dedup_weights(trainable_weights)
else:
return []
@property
def non_trainable_weights(self):
# Overridden from Layer class to track submetric weights.
if self.trainable:
non_trainable_weights = self._non_trainable_weights
for m in self._metrics:
non_trainable_weights += m.non_trainable_weights
else:
non_trainable_weights = (
self._non_trainable_weights + self._trainable_weights
)
for m in self._metrics:
non_trainable_weights += m.weights
return self._dedup_weights(non_trainable_weights)
@property
def _trackable_saved_model_saver(self):
return metric_serialization.MetricSavedModelSaver(self)
@generic_utils.default
@doc_controls.do_not_generate_docs
def reset_states(self):
# Backwards compatibility alias of `reset_state`. New classes should
# only implement `reset_state`.
return self.reset_state()
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self, reduction, name, dtype=None):
super().__init__(name=name, dtype=dtype)
self.reduction = reduction
self.total = self.add_weight("total", initializer="zeros")
if reduction in [
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN,
]:
self.count = self.add_weight("count", initializer="zeros")
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the metric.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to `1`.
Returns:
Update op.
"""
[
values
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[values], sample_weight
)
try:
values = tf.cast(values, self._dtype)
except (ValueError, TypeError):
msg = (
"The output of a metric function can only be a single Tensor. "
f"Received: {values}. "
)
if isinstance(values, dict):
msg += (
"To return a dict of values, implement a custom Metric "
"subclass."
)
raise RuntimeError(msg)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
(
values,
_,
sample_weight,
) = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight
)
try:
# Broadcast weights if possible.
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values
)
except ValueError:
# Reduce values to same ndim as weight array
ndim = backend.ndim(values)
weight_ndim = backend.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = tf.reduce_sum(
values, axis=list(range(weight_ndim, ndim))
)
else:
values = tf.reduce_mean(
values, axis=list(range(weight_ndim, ndim))
)
values = tf.multiply(values, sample_weight)
value_sum = tf.reduce_sum(values)
with tf.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = tf.cast(tf.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = tf.cast(tf.size(values), self._dtype)
else:
num_values = tf.reduce_sum(sample_weight)
else:
raise NotImplementedError(
f'Reduction "{self.reduction}" not implemented. Expected '
'"sum", "weighted_mean", or "sum_over_batch_size".'
)
with tf.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return tf.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
]:
return tf.math.divide_no_nan(self.total, self.count)
else:
raise NotImplementedError(
f'Reduction "{self.reduction}" not implemented. Expected '
'"sum", "weighted_mean", or "sum_over_batch_size".'
)
@keras_export("keras.metrics.Sum")
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum
of `values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of
0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
16.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="sum", dtype=None):
super().__init__(
reduction=metrics_utils.Reduction.SUM, name=name, dtype=dtype
)
@keras_export("keras.metrics.Mean")
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as
`mean` which is an idempotent operation that simply divides `total` by
`count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Mean()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
4.0
>>> m.reset_state()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
2.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean", dtype=None):
super().__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.MeanMetricWrapper")
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric.
You could use this class to quickly build a mean metric from a function. The
function needs to have the signature `fn(y_true, y_pred)` and return a
per-sample loss array. `MeanMetricWrapper.result()` will return
the average metric value across all samples seen so far.
For example:
```python
def accuracy(y_true, y_pred):
return tf.cast(tf.math.equal(y_true, y_pred), tf.float32)
accuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy)
keras_model.compile(..., metrics=accuracy_metric)
```
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Keyword arguments to pass on to `fn`.
"""
@dtensor_utils.inject_mesh
def __init__(self, fn, name=None, dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric
is simply scaled by the given value. If `sample_weight` is a tensor
of size `[batch_size]`, then the metric for each sample of the batch
is rescaled by the corresponding element in the `sample_weight`
vector. If the shape of `sample_weight` is `[batch_size, d0, ..
dN-1]` (or can be broadcasted to this shape), then each metric
element of `y_pred` is scaled by the corresponding value of
`sample_weight`. (Note on `dN-1`: all metric functions reduce by 1
dimension, usually the last axis (-1)).
Returns:
Update op.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
[
y_true,
y_pred,
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[y_true, y_pred], sample_weight
)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true
)
ag_fn = tf.__internal__.autograph.tf_convert(
self._fn, tf.__internal__.autograph.control_status_ctx()
)
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
mask = losses_utils.get_mask(matches)
sample_weight = losses_utils.apply_valid_mask(
matches, sample_weight, mask, self.reduction
)
return super().update_state(matches, sample_weight=sample_weight)
def get_config(self):
config = {
k: backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
for k, v in self._fn_kwargs.items()
}
if type(self) is MeanMetricWrapper:
# Only include function argument when the object is a
# MeanMetricWrapper and not a subclass.
config["fn"] = self._fn
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
from tf_keras.metrics import get
# Note that while MeanMetricWrapper itself isn't public, objects of this
# class may be created and added to the model by calling model.compile.
fn = config.pop("fn", None)
if cls is MeanMetricWrapper:
return cls(get(fn), **config)
return super(MeanMetricWrapper, cls).from_config(config)
@keras_export("keras.metrics.MeanTensor")
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
shape: (Optional) A list of integers, a tuple of integers, or a 1-D Tensor
of type int32. If not specified, the shape is inferred from the values
at the first call of update_state.
Standalone usage:
>>> m = tf.keras.metrics.MeanTensor()
>>> m.update_state([0, 1, 2, 3])
>>> m.update_state([4, 5, 6, 7])
>>> m.result().numpy()
array([2., 3., 4., 5.], dtype=float32)
>>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
>>> m.result().numpy()
array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)
>>> m = tf.keras.metrics.MeanTensor(dtype=tf.float64, shape=(1, 4))
>>> m.result().numpy()
array([[0., 0., 0., 0.]])
>>> m.update_state([[0, 1, 2, 3]])
>>> m.update_state([[4, 5, 6, 7]])
>>> m.result().numpy()
array([[2., 3., 4., 5.]])
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean_tensor", dtype=None, shape=None):
super().__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
if shape is not None:
self._build(shape)
def _build(self, shape):
self._shape = tf.TensorShape(shape)
self._build_input_shape = self._shape
# Create new state variables
self._total = self.add_weight(
name="total", shape=shape, initializer="zeros"
)
self._count = self.add_weight(
name="count", shape=shape, initializer="zeros"
)
with tf.init_scope():
if not tf.executing_eagerly():
backend._initialize_variables(backend._get_session())
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to `1`.
Returns:
Update op.
"""
values = tf.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError(
"MeanTensor input values must always have the same "
"shape. Expected shape (set during the first call): "
f"{self._shape}. "
f"Got: {values.shape}."
)
num_values = tf.ones_like(values)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
(
values,
_,
sample_weight,
) = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight
)
try:
# Broadcast weights if possible.
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values
)
except ValueError:
# Reduce values to same ndim as weight array
ndim = backend.ndim(values)
weight_ndim = backend.ndim(sample_weight)
values = tf.reduce_mean(
values, axis=list(range(weight_ndim, ndim))
)
num_values = tf.multiply(num_values, sample_weight)
values = tf.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with tf.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
"MeanTensor does not have any value yet. Please call the "
"MeanTensor instance or use `.update_state(value)` "
"before retrieving the result."
)
return tf.math.divide_no_nan(self.total, self.count)
def reset_state(self):
if self._built:
backend.batch_set_value(
[(v, np.zeros(v.shape.as_list())) for v in self.variables]
)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of
0 to mask values.
"""
def __init__(self, name="sum_over_batch_size", dtype=None):
super().__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype,
)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super().__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true
)
ag_fn = tf.__internal__.autograph.tf_convert(
self._fn, tf.__internal__.autograph.control_status_ctx()
)
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
mask = losses_utils.get_mask(matches)
sample_weight = losses_utils.apply_valid_mask(
matches, sample_weight, mask, self.reduction
)
return super().update_state(matches, sample_weight=sample_weight)
def get_config(self):
config = {
k: backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
for k, v in self._fn_kwargs.items()
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
# Metrics created within a remotely-executed tf.function during
# parameter server evaluation should not be lifted out of the graph by
# `init_scope`. This way the metric variables can be local: freely
# usable and mutable within the function. This supports a visitation
# guarantee for model evaluation.
if tf_utils.in_local_vars_context():
return metric.__class__.from_config(metric.get_config())
else:
with tf.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
return tf.nest.map_structure(clone_metric, metrics)
def is_built_in(cls):
return cls.__module__.startswith(
".".join(Metric.__module__.split(".")[:-1])
)
class _MetricDict(dict):
"""Wrapper for returned dictionary of metrics."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._metric_obj = None
| tf-keras/tf_keras/metrics/base_metric.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/base_metric.py",
"repo_id": "tf-keras",
"token_count": 16026
} | 234 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression metrics, e.g. MAE/MSE/etc."""
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.dtensor import utils as dtensor_utils
from tf_keras.losses import logcosh
from tf_keras.losses import mean_absolute_error
from tf_keras.losses import mean_absolute_percentage_error
from tf_keras.losses import mean_squared_error
from tf_keras.losses import mean_squared_logarithmic_error
from tf_keras.metrics import base_metric
from tf_keras.utils import losses_utils
from tf_keras.utils import metrics_utils
from tf_keras.utils.tf_utils import is_tensor_or_variable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.metrics.MeanRelativeError")
class MeanRelativeError(base_metric.Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used
to compute the mean relative error. This is weighted by `sample_weight`, and
it is ultimately returned as `mean_relative_error`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
>>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
>>> # metric = mean(|y_pred - y_true| / normalizer)
>>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
>>> # = 5/4 = 1.25
>>> m.result().numpy()
1.25
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, normalizer, name=None, dtype=None):
super().__init__(name=name, dtype=dtype)
normalizer = tf.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
[
y_pred,
y_true,
], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
[y_pred, y_true], sample_weight
)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true
)
y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(
y_pred, self.normalizer
)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = tf.math.divide_no_nan(
tf.abs(y_true - y_pred), self.normalizer
)
return super().update_state(
relative_errors, sample_weight=sample_weight
)
def get_config(self):
n = self.normalizer
config = {
"normalizer": backend.eval(n) if is_tensor_or_variable(n) else n
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.metrics.CosineSimilarity")
class CosineSimilarity(base_metric.MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
`cosine similarity = (a . b) / ||a|| ||b||`
See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) The dimension along which the cosine
similarity is computed. Defaults to `-1`.
Standalone usage:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> m = tf.keras.metrics.CosineSimilarity(axis=1)
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> m.result().numpy()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
... sample_weight=[0.3, 0.7])
>>> m.result().numpy()
0.6999999
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="cosine_similarity", dtype=None, axis=-1):
super().__init__(cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export("keras.metrics.MeanAbsoluteError")
class MeanAbsoluteError(base_metric.MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean_absolute_error", dtype=None):
super().__init__(mean_absolute_error, name, dtype=dtype)
@keras_export("keras.metrics.MeanAbsolutePercentageError")
class MeanAbsolutePercentageError(base_metric.MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and
`y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsolutePercentageError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
250000000.0
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
500000000.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean_absolute_percentage_error", dtype=None):
super().__init__(mean_absolute_percentage_error, name, dtype=dtype)
@keras_export("keras.metrics.MeanSquaredError")
class MeanSquaredError(base_metric.MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean_squared_error", dtype=None):
super().__init__(mean_squared_error, name, dtype=dtype)
@keras_export("keras.metrics.MeanSquaredLogarithmicError")
class MeanSquaredLogarithmicError(base_metric.MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and
`y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.12011322
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="mean_squared_logarithmic_error", dtype=None):
super().__init__(mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export("keras.metrics.RootMeanSquaredError")
class RootMeanSquaredError(base_metric.Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Standalone usage:
>>> m = tf.keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.70710677
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="root_mean_squared_error", dtype=None):
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true
)
error_sq = tf.math.squared_difference(y_pred, y_true)
return super().update_state(error_sq, sample_weight=sample_weight)
def result(self):
return tf.sqrt(tf.math.divide_no_nan(self.total, self.count))
@keras_export("keras.metrics.LogCoshError")
class LogCoshError(base_metric.MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred -
y_true)
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.LogCoshError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.10844523
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.21689045
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.LogCoshError()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="logcosh", dtype=None):
super().__init__(logcosh, name, dtype=dtype)
# Adapted from TF-Addons implementation (RSquare class).
@keras_export("keras.metrics.R2Score")
class R2Score(base_metric.Metric):
"""Computes R2 score.
This is also called the
[coefficient of
determination](https://en.wikipedia.org/wiki/Coefficient_of_determination).
It indicates how close the fitted regression line
is to ground-truth data.
- The highest score possible is 1.0. It indicates that the predictors
perfectly accounts for variation in the target.
- A score of 0.0 indicates that the predictors do not
account for variation in the target.
- It can also be negative if the model is worse than random.
This metric can also compute the "Adjusted R2" score.
Args:
class_aggregation: Specifies how to aggregate scores corresponding to
different output classes (or target dimensions),
i.e. different dimensions on the last axis of the predictions.
Equivalent to `multioutput` argument in Scikit-Learn.
Should be one of
`None` (no aggregation), `"uniform_average"`,
`"variance_weighted_average"`.
num_regressors: Number of independent regressors used
("Adjusted R2" score). 0 is the standard R2 score.
Defaults to `0`.
name: Optional. string name of the metric instance.
dtype: Optional. data type of the metric result.
Example:
>>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
>>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
>>> metric = tf.keras.metrics.R2Score()
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
>>> result.numpy()
0.57142854
"""
@dtensor_utils.inject_mesh
def __init__(
self,
class_aggregation="uniform_average",
num_regressors=0,
name="r2_score",
dtype=None,
):
super().__init__(name=name, dtype=dtype)
valid_class_aggregation_values = (
None,
"uniform_average",
"variance_weighted_average",
)
if class_aggregation not in valid_class_aggregation_values:
raise ValueError(
"Invalid value for argument `class_aggregation`. Expected "
f"one of {valid_class_aggregation_values}. "
f"Received: class_aggregation={class_aggregation}"
)
if num_regressors < 0:
raise ValueError(
"Invalid value for argument `num_regressors`. "
"Expected a value >= 0. "
f"Received: num_regressors={num_regressors}"
)
self.class_aggregation = class_aggregation
self.num_regressors = num_regressors
self.num_samples = self.add_weight(name="num_samples", dtype="int32")
self.built = False
def build(self, y_true_shape, y_pred_shape):
if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
if y_pred_shape[-1] is None or y_true_shape[-1] is None:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim), with output_dim fully "
"defined (not None). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
num_classes = y_pred_shape[-1]
self.squared_sum = self.add_weight(
name="squared_sum",
shape=[num_classes],
initializer="zeros",
)
self.sum = self.add_weight(
name="sum",
shape=[num_classes],
initializer="zeros",
)
self.total_mse = self.add_weight(
name="residual",
shape=[num_classes],
initializer="zeros",
)
self.count = self.add_weight(
name="count",
shape=[num_classes],
initializer="zeros",
)
self.built = True
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
if not self.built:
self.build(y_true.shape, y_pred.shape)
if sample_weight is None:
sample_weight = 1
sample_weight = tf.convert_to_tensor(sample_weight, dtype=self.dtype)
if sample_weight.shape.rank == 1:
# Make sure there's a features dimension
sample_weight = tf.expand_dims(sample_weight, axis=1)
sample_weight = tf.__internal__.ops.broadcast_weights(
weights=sample_weight, values=y_true
)
weighted_y_true = y_true * sample_weight
self.sum.assign_add(tf.reduce_sum(weighted_y_true, axis=0))
self.squared_sum.assign_add(
tf.reduce_sum(y_true * weighted_y_true, axis=0)
)
self.total_mse.assign_add(
tf.reduce_sum((y_true - y_pred) ** 2 * sample_weight, axis=0)
)
self.count.assign_add(tf.reduce_sum(sample_weight, axis=0))
self.num_samples.assign_add(tf.size(y_true))
def result(self):
mean = self.sum / self.count
total = self.squared_sum - self.sum * mean
raw_scores = 1 - (self.total_mse / total)
raw_scores = tf.where(tf.math.is_inf(raw_scores), 0.0, raw_scores)
if self.class_aggregation == "uniform_average":
r2_score = tf.reduce_mean(raw_scores)
elif self.class_aggregation == "variance_weighted_average":
weighted_sum = tf.reduce_sum(total * raw_scores)
sum_of_weights = tf.reduce_sum(total)
r2_score = weighted_sum / sum_of_weights
else:
r2_score = raw_scores
if self.num_regressors != 0:
if self.num_regressors > self.num_samples - 1:
warnings.warn(
"More independent predictors than datapoints "
"in adjusted R2 score. Falling back to standard R2 score.",
stacklevel=2,
)
elif self.num_regressors == self.num_samples - 1:
warnings.warn(
"Division by zero in Adjusted R2 score. "
"Falling back to standard R2 score.",
stacklevel=2,
)
else:
n = tf.cast(self.num_samples, dtype=tf.float32)
p = tf.cast(self.num_regressors, dtype=tf.float32)
num = tf.multiply(
tf.subtract(1.0, r2_score), tf.subtract(n, 1.0)
)
den = tf.subtract(tf.subtract(n, p), 1.0)
r2_score = tf.subtract(1.0, tf.divide(num, den))
return r2_score
def reset_state(self):
for v in self.variables:
v.assign(tf.zeros(v.shape, dtype=v.dtype))
def get_config(self):
config = {
"class_aggregation": self.class_aggregation,
"num_regressors": self.num_regressors,
}
base_config = super().get_config()
return {**base_config, **config}
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) -1 is the dimension along which the cosine
similarity is computed. Defaults to `-1`.
Returns:
Cosine similarity value.
"""
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
return tf.reduce_sum(y_true * y_pred, axis=axis)
| tf-keras/tf_keras/metrics/regression_metrics.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/regression_metrics.py",
"repo_id": "tf-keras",
"token_count": 9462
} | 235 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains testing utilities related to mixed precision."""
import tensorflow.compat.v2 as tf
from tf_keras import regularizers
from tf_keras.engine import base_layer
def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None):
"""Returns a function that asserts it's gradient has a certain value.
This serves as a hook to assert intermediate gradients have a certain value.
This returns an identity function. The identity's gradient function is also
the identity function, except it asserts that the gradient equals
`expected_gradient` and has dtype `expected_dtype`.
Args:
expected_gradient: The gradient function asserts that the gradient is this
value.
expected_dtype: The gradient function asserts the gradient has this dtype.
Returns:
An identity function whose gradient function asserts the gradient has a
certain value.
"""
@tf.custom_gradient
def _identity_with_grad_check(x):
"""Function that asserts it's gradient has a certain value."""
x = tf.identity(x)
def grad(dx):
"""Gradient function that asserts the gradient has a certain
value."""
if expected_dtype:
assert (
dx.dtype == expected_dtype
), f"dx.dtype should be {expected_dtype} but is: {dx.dtype}"
expected_tensor = tf.convert_to_tensor(
expected_gradient, dtype=dx.dtype, name="expected_gradient"
)
# Control dependency is to ensure input is available. It's possible
# the dataset will throw a StopIteration to indicate there is no
# more data, in which case we don't want to run the assertion.
with tf.control_dependencies([x]):
assert_op = tf.compat.v1.assert_equal(dx, expected_tensor)
with tf.control_dependencies([assert_op]):
dx = tf.identity(dx)
return dx
return x, grad
# TF-Keras sometimes has trouble serializing Lambda layers with a decorated
# function. So we define and return a non-decorated function.
def identity_with_grad_check(x):
return _identity_with_grad_check(x)
return identity_with_grad_check
def create_identity_with_nan_gradients_fn(have_nan_gradients):
"""Returns a function that optionally has NaN gradients.
This serves as a hook to introduce NaN gradients to a model. This returns an
identity function. The identity's gradient function will check if the
boolean tensor `have_nan_gradients` is True. If so, the gradient will be
NaN. Otherwise, the gradient will also be the identity.
Args:
have_nan_gradients: A scalar boolean tensor. If True, gradients will be
NaN. Otherwise, the gradient function is the identity function.
Returns:
An identity function whose gradient function will return NaNs, if
`have_nan_gradients` is True.
"""
@tf.custom_gradient
def _identity_with_nan_gradients(x):
"""Function whose gradient is NaN iff `have_nan_gradients` is True."""
x = tf.identity(x)
def grad(dx):
return tf.cond(
have_nan_gradients, lambda: dx * float("NaN"), lambda: dx
)
return x, grad
# TF-Keras sometimes has trouble serializing Lambda layers with a decorated
# function. So we define and return a non-decorated function.
def identity_with_nan_gradients(x):
return _identity_with_nan_gradients(x)
return identity_with_nan_gradients
class AssertTypeLayer(base_layer.Layer):
"""A layer which asserts it's inputs are a certain type."""
def __init__(self, assert_type=None, **kwargs):
self._assert_type = (
tf.as_dtype(assert_type).name if assert_type else None
)
super().__init__(**kwargs)
def assert_input_types(self, inputs):
"""Asserts `inputs` are of the correct type. Should be called in
call()."""
if self._assert_type:
inputs_flattened = tf.nest.flatten(inputs)
for inp in inputs_flattened:
assert inp.dtype.base_dtype == self._assert_type, (
"Input tensor has type %s which does "
"not match assert type %s"
% (inp.dtype.name, self._assert_type)
)
class MultiplyLayer(AssertTypeLayer):
"""A layer which multiplies its input by a scalar variable."""
def __init__(
self,
regularizer=None,
activity_regularizer=None,
use_operator=False,
var_name="v",
**kwargs,
):
"""Initializes the MultiplyLayer.
Args:
regularizer: The weight regularizer on the scalar variable.
activity_regularizer: The activity regularizer.
use_operator: If True, add using the * operator. If False, add using
tf.multiply.
var_name: The name of the variable. It can be useful to pass a name
other than 'v', to test having the attribute name (self.v) being
different from the variable name.
**kwargs: Passed to AssertTypeLayer constructor.
"""
self._regularizer = regularizer
if isinstance(regularizer, dict):
self._regularizer = regularizers.deserialize(
regularizer, custom_objects=globals()
)
self._activity_regularizer = activity_regularizer
if isinstance(activity_regularizer, dict):
self._activity_regularizer = regularizers.deserialize(
activity_regularizer, custom_objects=globals()
)
self._use_operator = use_operator
self._var_name = var_name
super().__init__(
activity_regularizer=self._activity_regularizer, **kwargs
)
def build(self, _):
self.v = self.add_weight(
self._var_name,
(),
initializer="ones",
regularizer=self._regularizer,
)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
return self._multiply(inputs, self.v)
def _multiply(self, x, y):
if self._use_operator:
return x * y
else:
return tf.multiply(x, y)
def get_config(self):
config = super().get_config()
config["regularizer"] = regularizers.serialize(self._regularizer)
config["activity_regularizer"] = regularizers.serialize(
self._activity_regularizer
)
config["use_operator"] = self._use_operator
config["var_name"] = self._var_name
config["assert_type"] = self._assert_type
return config
class MultiplyLayerWithoutAutoCast(MultiplyLayer):
"""Same as MultiplyLayer, but does not use AutoCastVariables."""
def build(self, _):
dtype = self.dtype
if dtype in ("float16", "bfloat16"):
dtype = "float32"
self.v = self.add_weight(
"v",
(),
initializer="ones",
dtype=dtype,
experimental_autocast=False,
regularizer=self._regularizer,
)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert self.v.dtype in (tf.float32, tf.float64)
return self._multiply(inputs, tf.cast(self.v, inputs.dtype))
class IdentityRegularizer(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == tf.float32
return tf.identity(x)
def get_config(self):
return {}
class ReduceSumRegularizer(regularizers.Regularizer):
def __call__(self, x):
return tf.reduce_sum(x)
def get_config(self):
return {}
| tf-keras/tf_keras/mixed_precision/test_util.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/test_util.py",
"repo_id": "tf-keras",
"token_count": 3427
} | 236 |
tf_keras/api/tests/API_UPDATE_WARNING.txt:
tf_keras/api/tests/README.txt:
tf_keras/benchmarks/layer_benchmarks/run_xprof.py:
| tf-keras/tf_keras/opensource_only.files/0 | {
"file_path": "tf-keras/tf_keras/opensource_only.files",
"repo_id": "tf-keras",
"token_count": 54
} | 237 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras import backend_config
from tf_keras.optimizers.legacy import optimizer_v2
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.optimizers.legacy.Adam",
v1=["keras.optimizers.Adam", "keras.optimizers.legacy.Adam"],
)
class Adam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use, The
learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use, The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
`1e-7`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond". Defaults to `False`.
name: Optional name for the operations created when applying gradients.
Defaults to `"Adam"`.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Usage:
>>> opt = tf.keras.optimizers.legacy.Adam(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> # The first step is `-learning_rate*sign(grad)`
>>> var1.numpy()
9.9
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
Notes:
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name="Adam",
**kwargs
):
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
if self.amsgrad:
for var in var_list:
self.add_slot(var, "vhat")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_t = tf.identity(self._get_hyper("beta_1", var_dtype))
beta_2_t = tf.identity(self._get_hyper("beta_2", var_dtype))
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
lr = apply_state[(var_device, var_dtype)]["lr_t"] * (
tf.sqrt(1 - beta_2_power) / (1 - beta_1_power)
)
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
)
)
def set_weights(self, weights):
params = self.weights
# If weights are generated by TF-Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[: len(params)]
super().set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
if not self.amsgrad:
return tf.raw_ops.ResourceApplyAdam(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients["beta_1_power"],
beta2_power=coefficients["beta_2_power"],
lr=coefficients["lr_t"],
beta1=coefficients["beta_1_t"],
beta2=coefficients["beta_2_t"],
epsilon=coefficients["epsilon"],
grad=grad,
use_locking=self._use_locking,
)
else:
vhat = self.get_slot(var, "vhat")
return tf.raw_ops.ResourceApplyAdamWithAmsgrad(
var=var.handle,
m=m.handle,
v=v.handle,
vhat=vhat.handle,
beta1_power=coefficients["beta_1_power"],
beta2_power=coefficients["beta_2_power"],
lr=coefficients["lr_t"],
beta1=coefficients["beta_1_t"],
beta2=coefficients["beta_2_t"],
epsilon=coefficients["epsilon"],
grad=grad,
use_locking=self._use_locking,
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coefficients["one_minus_beta_1_t"]
m_t = tf.compat.v1.assign(
m, m * coefficients["beta_1_t"], use_locking=self._use_locking
)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coefficients["one_minus_beta_2_t"]
v_t = tf.compat.v1.assign(
v, v * coefficients["beta_2_t"], use_locking=self._use_locking
)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if not self.amsgrad:
v_sqrt = tf.sqrt(v_t)
var_update = tf.compat.v1.assign_sub(
var,
coefficients["lr"] * m_t / (v_sqrt + coefficients["epsilon"]),
use_locking=self._use_locking,
)
return tf.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, "vhat")
v_hat_t = tf.maximum(v_hat, v_t)
with tf.control_dependencies([v_hat_t]):
v_hat_t = tf.compat.v1.assign(
v_hat, v_hat_t, use_locking=self._use_locking
)
v_hat_sqrt = tf.sqrt(v_hat_t)
var_update = tf.compat.v1.assign_sub(
var,
coefficients["lr"]
* m_t
/ (v_hat_sqrt + coefficients["epsilon"]),
use_locking=self._use_locking,
)
return tf.group(*[var_update, m_t, v_t, v_hat_t])
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
class NonFusedAdam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adam algorithm without fused kernels.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to the paper
[Adam: A Method for Stochastic Optimization. Kingma et al.,
2014](http://arxiv.org/abs/1412.6980), the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
For AMSGrad see [On The Convergence Of Adam And Beyond.
Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ).
**If amsgrad = False**:
initialize $m_0$ as 1st moment vector
initialize $v_0$ as 2nd moment vector
The update rule for $\theta$ with gradient $g$ uses an optimization
described at the end of section 2 of the paper:
$$lr_t = \mathrm{learning\_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
$$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
**If amsgrad = True**:
initialize $m_0$ as 1st moment vector
initialize $v_0$ as 2nd moment vector
initialize $\hat{v}_0$ as 2nd moment vector
The update rule for $\theta$ with gradient $g$ uses an optimization
described at the end of section 2 of the paper:
$$lr_t = \mathrm{learning\_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
$$\hat{v}_t = \max(\hat{v}_{t-1}, v_t)$$
$$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Usage:
>>> opt = tf.keras.optimizers.legacy.Adam(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> # The first step is `-learning_rate*sign(grad)`
>>> var1.numpy()
9.9
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name="Adam",
**kwargs
):
"""Construct a new Adam optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is
a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a
callable that takes no arguments and returns the actual value to
use, The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable that
takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
beta_2: A float value or a constant float tensor, or a callable that
takes no arguments and returns the actual value to use, The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to `1e-7`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond". Defaults to
`False`.
name: Optional name for the operations created when applying
gradients. Defaults to "Adam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
`lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is
clip gradients by value, `decay` is included for backward
compatibility to allow time inverse decay of learning rate. `lr` is
included for backward compatibility, recommended to use
`learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
if self.amsgrad:
for var in var_list:
self.add_slot(var, "vhat")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_t = tf.identity(self._get_hyper("beta_1", var_dtype))
beta_2_t = tf.identity(self._get_hyper("beta_2", var_dtype))
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
lr = apply_state[(var_device, var_dtype)]["lr_t"] * (
tf.sqrt(1 - beta_2_power) / (1 - beta_1_power)
)
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
)
)
def set_weights(self, weights):
params = self.weights
# If weights are generated by TF-Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[: len(params)]
super().set_weights(weights)
@tf.function(jit_compile=True)
def _resource_apply_dense_impl(self, grad, var, apply_state):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
alpha = (
coefficients["lr_t"]
* tf.sqrt(1 - coefficients["beta_2_power"])
/ (1 - coefficients["beta_1_power"])
)
m.assign_add((grad - m) * (1 - coefficients["beta_1_t"]))
v.assign_add((tf.square(grad) - v) * (1 - coefficients["beta_2_t"]))
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat.assign(tf.maximum(vhat, v))
v = vhat
var.assign_sub((m * alpha) / (tf.sqrt(v) + coefficients["epsilon"]))
def _resource_apply_dense(self, grad, var, apply_state=None):
self._resource_apply_dense_impl(grad, var, apply_state)
if not tf.executing_eagerly():
return tf.compat.v1.get_default_graph().get_operations()[-1]
@tf.function(jit_compile=True)
def _resource_apply_sparse_impl(self, grad, var, indices, apply_state):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coefficients["one_minus_beta_1_t"]
m.assign(m * coefficients["beta_1_t"])
m.scatter_add(tf.IndexedSlices(m_scaled_g_values, indices))
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coefficients["one_minus_beta_2_t"]
v.assign(v * coefficients["beta_2_t"])
v.scatter_add(tf.IndexedSlices(v_scaled_g_values, indices))
if not self.amsgrad:
var.assign_sub(
coefficients["lr"] * m / (tf.sqrt(v) + coefficients["epsilon"])
)
else:
v_hat = self.get_slot(var, "vhat")
v_hat.assign(tf.maximum(v_hat, v))
var.assign_sub(
coefficients["lr"]
* m
/ (tf.sqrt(v_hat) + coefficients["epsilon"])
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
self._resource_apply_sparse_impl(grad, var, indices, apply_state)
if not tf.executing_eagerly():
return tf.compat.v1.get_default_graph().get_operations()[-1]
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
| tf-keras/tf_keras/optimizers/legacy/adam.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adam.py",
"repo_id": "tf-keras",
"token_count": 10024
} | 238 |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lion optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export("keras.optimizers.Lion", v1=[])
class Lion(optimizer.Optimizer):
"""Optimizer that implements the Lion algorithm.
The Lion optimizer is a stochastic-gradient-descent method that uses the
sign operator to control the magnitude of the update, unlike other adaptive
optimizers such as Adam that rely on second-order moments. This make
Lion more memory-efficient as it only keeps track of the momentum. According
to the authors (see reference), its performance gain over Adam grows with
the batch size. Because the update of Lion is produced through the sign
operation, resulting in a larger norm, a suitable learning rate for Lion is
typically 3-10x smaller than that for AdamW. The weight decay for Lion
should be in turn 3-10x larger than that for AdamW to maintain a
similar strength (lr * wd).
Args:
learning_rate: A `tf.Tensor`, floating point value, a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to 0.0001.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
rate to combine the current gradient and the 1st moment estimate.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimate.
{{base_optimizer_keyword_args}}
References:
- [Chen et al., 2023](http://arxiv.org/abs/2302.06675)
- [Authors' implementation](
http://github.com/google/automl/tree/master/lion)
"""
def __init__(
self,
learning_rate=0.0001,
beta_1=0.9,
beta_2=0.99,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
name="Lion",
**kwargs,
):
super().__init__(
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
**kwargs,
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.beta_1 = beta_1
self.beta_2 = beta_2
if beta_1 <= 0 or beta_1 > 1:
raise ValueError(
f"`beta_1`={beta_1} must be between ]0, 1]. Otherwise, "
"the optimizer degenerates to SignSGD."
)
def build(self, var_list):
"""Initialize optimizer variables.
Lion optimizer has one variable `momentums`.
Args:
var_list: list of model variables to build Lion variables on.
"""
super().build(var_list)
if hasattr(self, "_built") and self._built:
return
self.momentums = []
for var in var_list:
self.momentums.append(
self.add_variable_from_reference(
model_variable=var, variable_name="m"
)
)
self._built = True
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
lr = tf.cast(self.learning_rate, variable.dtype)
beta_1 = tf.cast(self.beta_1, variable.dtype)
beta_2 = tf.cast(self.beta_2, variable.dtype)
var_key = self._var_key(variable)
m = self.momentums[self._index_dict[var_key]]
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients (use m as a buffer)
m.assign(m * beta_1)
m.scatter_add(
tf.IndexedSlices(
gradient.values * (1.0 - beta_1), gradient.indices
)
)
variable.assign_sub(lr * tf.math.sign(m))
m.assign(m * beta_2 / beta_1)
m.scatter_add(
tf.IndexedSlices(
gradient.values * (1.0 - beta_2 / beta_1), gradient.indices
)
)
else:
# Dense gradients
variable.assign_sub(
lr * tf.math.sign(m * beta_1 + gradient * (1.0 - beta_1))
)
m.assign(m * beta_2 + gradient * (1.0 - beta_2))
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"beta_1": self.beta_1,
"beta_2": self.beta_2,
}
)
return config
Lion.__doc__ = Lion.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/lion.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/lion.py",
"repo_id": "tf-keras",
"token_count": 2686
} | 239 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Protobuf containing the version for each TF-Keras object saved in a SavedModel.
syntax = "proto3";
package third_party.py.tf_keras.protobuf;
// This file is a copy of the TensorFlow Versions proto.
// Keep this file in sync with the source proto definition at
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/versions.proto
// Version information for a piece of serialized data
//
// There are different types of versions for each type of data
// (GraphDef, etc.), but they all have the same common shape
// described here.
//
// Each consumer has "consumer" and "min_producer" versions (specified
// elsewhere). A consumer is allowed to consume this data if
//
// producer >= min_producer
// consumer >= min_consumer
// consumer not in bad_consumers
//
// LINT.IfChange
message VersionDef {
// The version of the code that produced this data.
int32 producer = 1;
// Any consumer below this version is not allowed to consume this data.
int32 min_consumer = 2;
// Specific consumer versions which are disallowed (e.g. due to bugs).
repeated int32 bad_consumers = 3;
}
// LINT.ThenChange(third_party/tensorflow/core/framework/versions.proto)
| tf-keras/tf_keras/protobuf/versions.proto/0 | {
"file_path": "tf-keras/tf_keras/protobuf/versions.proto",
"repo_id": "tf-keras",
"token_count": 499
} | 240 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes that list&validate all attributes to serialize to
SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.saving.legacy.saved_model import utils
class SavedModelSaver(object, metaclass=abc.ABCMeta):
"""Saver defining the methods and properties used to serialize Keras
objects."""
def __init__(self, obj):
self.obj = obj
@abc.abstractproperty
def object_identifier(self):
"""String stored in object identifier field in the SavedModel proto.
Returns:
A string with the object identifier, which is used at load time.
"""
raise NotImplementedError
@property
def tracking_metadata(self):
"""String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this
layer.
"""
# TODO(kathywu): check that serialized JSON can be loaded (e.g., if an
# object is in the python property)
return json_utils.Encoder().encode(self.python_properties)
def trackable_children(self, serialization_cache):
"""Lists all Trackable children connected to this object."""
if not utils.should_save_traces():
return {}
children = self.objects_to_serialize(serialization_cache)
children.update(self.functions_to_serialize(serialization_cache))
return children
@abc.abstractproperty
def python_properties(self):
"""Returns dictionary of python properties to save in the metadata.
This dictionary must be serializable and deserializable to/from JSON.
When loading, the items in this dict are used to initialize the object
and define attributes in the revived object.
"""
raise NotImplementedError
@abc.abstractmethod
def objects_to_serialize(self, serialization_cache):
"""Returns dictionary of extra checkpointable objects to serialize.
See `functions_to_serialize` for an explanation of this function's
effects.
Args:
serialization_cache: Dictionary passed to all objects in the same
object graph during serialization.
Returns:
A dictionary mapping attribute names to checkpointable objects.
"""
raise NotImplementedError
@abc.abstractmethod
def functions_to_serialize(self, serialization_cache):
"""Returns extra functions to include when serializing a TF-Keras
object.
Normally, when calling exporting an object to SavedModel, only the
functions and objects defined by the user are saved. For example:
```
obj = tf.Module()
obj.v = tf.Variable(1.)
@tf.function
def foo(...): ...
obj.foo = foo
w = tf.Variable(1.)
tf.saved_model.save(obj, 'path/to/saved/model')
loaded = tf.saved_model.load('path/to/saved/model')
loaded.v # Variable with the same value as obj.v
loaded.foo # Equivalent to obj.foo
loaded.w # AttributeError
```
Assigning trackable objects to attributes creates a graph, which is used
for both checkpointing and SavedModel serialization.
When the graph generated from attribute tracking is insufficient, extra
objects and functions may be added at serialization time. For example,
most models do not have their call function wrapped with a @tf.function
decorator. This results in `model.call` not being saved. Since Keras
objects should be revivable from the SavedModel format, the call
function is added as an extra function to serialize.
This function and `objects_to_serialize` is called multiple times when
exporting to SavedModel. Please use the cache to avoid generating new
functions and objects. A fresh cache is created for each SavedModel
export.
Args:
serialization_cache: Dictionary passed to all objects in the same
object graph during serialization.
Returns:
A dictionary mapping attribute names to `Function` or
`ConcreteFunction`.
"""
raise NotImplementedError
| tf-keras/tf_keras/saving/legacy/saved_model/base_serialization.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/base_serialization.py",
"repo_id": "tf-keras",
"token_count": 1736
} | 241 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes that list&validate all attributes to serialize to SavedModel.
"""
import tensorflow.compat.v2 as tf
from tf_keras.saving.legacy.saved_model import constants
from tf_keras.saving.legacy.saved_model import order_preserving_set as ops
from tf_keras.saving.legacy.saved_model import save_impl
from tf_keras.utils.generic_utils import LazyLoader
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
base_layer = LazyLoader("base_layer", globals(), "tf_keras.engine.base_layer")
training_lib = LazyLoader("training_lib", globals(), "tf_keras.engine.training")
metrics = LazyLoader("metrics", globals(), "tf_keras.metrics")
base_rnn = LazyLoader("base_rnn", globals(), "tf_keras.layers.rnn.base_rnn")
class SerializedAttributes:
"""Class that tracks and validates all serialization attributes.
TF-Keras models contain many Python-defined components. For example, the
trainable_variable property lists the model's trainable variables by
recursively retrieving the trainable variables from each of the child
layers. Another example is model.call, a python function that calls child
layers and adds ops to the backend graph.
Only Tensorflow checkpointable objects and functions can be serialized to
SavedModel. Serializing a TF-Keras model as-is results in a checkpointable
object that does not resemble a TF-Keras model at all. Thus, extra
checkpointable objects and functions must be created during serialization.
**Defining new serialized attributes**
Child classes should be defined using:
SerializedAttributes.with_attributes(
'name', checkpointable_objects=[...],
functions=[...], copy_from=[...])
This class is used to cache generated checkpointable objects and functions,
ensuring that new objects and functions are generated a single time.
**Usage during serialization**
Each Layer/Model object should have a corresponding instance of
SerializedAttributes. Create a new instance by calling
`SerializedAttributes.new(obj)`. Objects and functions may be saved using
`.set_and_validate_checkpointable_objects`/`.set_and_and_validate_functions`.
The properties `.checkpointable_objects` and `.functions` returns the cached
values.
**Adding/changing attributes to save to SavedModel**
1. Change the call to `SerializedAttributes.with_attributes` in the correct
class:
- CommonEndpoints: Base attributes to be added during serialization. If
these attributes are present in a Trackable object, it can be
deserialized to a TF-Keras Model.
- LayerAttributes: Attributes to serialize for Layer objects.
- ModelAttributes: Attributes to serialize for Model objects.
2. Update class docstring
3. Update arguments to any calls to `set_and_validate_*`. For example, if
`call_raw_tensors` is added to the ModelAttributes function list, then
a `call_raw_tensors` function should be passed to
`set_and_validate_functions`.
**Common endpoints vs other attributes**
Only common endpoints are attached directly to the root object.
Keras-specific attributes are saved to a separate trackable object with the
name "keras_api". The number of objects attached to the root is limited
because any naming conflicts will cause user code to break.
Another reason is that this will only affect users who call
`tf.saved_model.load` instead of `tf.keras.models.load_model`. These are
advanced users who are likely to have defined their own tf.functions and
trackable objects. The added Keras-specific attributes are kept out of the
way in the "keras_api" namespace.
Properties defined in this class may be used to filter out keras-specific
attributes:
- `functions_to_serialize`: Returns dict of functions to attach to the root
object.
- `checkpointable_objects_to_serialize`: Returns dict of objects to attach
to the root object (including separate trackable object containing
keras-specific attributes)
All changes to the serialized attributes must be backwards-compatible, so
attributes should not be removed or modified without sufficient
justification.
"""
@staticmethod
def with_attributes(
name, checkpointable_objects=None, functions=None, copy_from=None
):
"""Creates a subclass with all attributes as specified in the arguments.
Args:
name: Name of subclass
checkpointable_objects: List of checkpointable objects to be
serialized in the SavedModel.
functions: List of functions to be serialized in the SavedModel.
copy_from: List of other SerializedAttributes subclasses. The returned
class will copy checkpoint objects/functions from each subclass.
Returns:
Child class with attributes as defined in the `checkpointable_objects`
and `functions` lists.
"""
checkpointable_objects = checkpointable_objects or []
functions = functions or []
if copy_from is not None:
for cls in copy_from:
checkpointable_objects.extend(cls.all_checkpointable_objects)
functions.extend(cls.all_functions)
# OrderPreservingSets are used here to guarantee serialization
# determinism of TF-Keras objects.
classdict = {
"all_checkpointable_objects": ops.OrderPreservingSet(
checkpointable_objects
),
"all_functions": ops.OrderPreservingSet(functions),
}
return type(name, (SerializedAttributes,), classdict)
@staticmethod
def new(obj):
"""Returns a new SerializedAttribute object."""
if isinstance(obj, training_lib.Model):
return ModelAttributes()
elif isinstance(obj, metrics.Metric):
return MetricAttributes()
elif isinstance(obj, base_rnn.RNN):
return RNNAttributes()
elif isinstance(obj, base_layer.Layer):
return LayerAttributes()
else:
raise TypeError(
"Internal error during serialization. Expected TF-Keras "
f"Layer object. Received: {obj} "
f"(of type {type(obj)})"
)
def __init__(self):
self._object_dict = {}
self._function_dict = {}
self._keras_trackable = tf.__internal__.tracking.AutoTrackable()
@property
def functions(self):
"""Returns dictionary of all functions."""
return {
key: value
for key, value in self._function_dict.items()
if value is not None
}
@property
def checkpointable_objects(self):
"""Returns dictionary of all checkpointable objects."""
return {
key: value
for key, value in self._object_dict.items()
if value is not None
}
@property
def functions_to_serialize(self):
"""Returns functions to attach to the root object during
serialization."""
functions = {}
for key, v in self.functions.items():
if key in CommonEndpoints.all_functions:
functions[key] = (
v.wrapped_call if isinstance(v, save_impl.LayerCall) else v
)
return functions
@property
def objects_to_serialize(self):
"""Returns objects to attach to the root object during serialization."""
objects = {
key: value
for key, value in self.checkpointable_objects.items()
if key in CommonEndpoints.all_checkpointable_objects
}
objects[constants.KERAS_ATTR] = self._keras_trackable
return objects
def set_and_validate_functions(self, function_dict):
"""Saves function dictionary, and validates dictionary values."""
for key in self.all_functions:
if key in function_dict:
if function_dict[
key
# Not all functions are required
] is not None and not isinstance(
function_dict[key],
(
tf.__internal__.function.Function,
tf.types.experimental.ConcreteFunction,
save_impl.LayerCall,
),
):
raise ValueError(
"The tf.function dictionary contained a non-function "
f"object: {function_dict[key]} (for key {key}). Only "
"tf.function instances or ConcreteFunction instances "
"should be passed."
)
fn = function_dict[key]
self._function_dict[key] = fn
# Extract TensorFlow `Function` from LayerCall.
tf_fn = (
fn.wrapped_call
if isinstance(fn, save_impl.LayerCall)
else fn
)
setattr(self._keras_trackable, key, tf_fn)
else:
raise ValueError(
f"Function {key} missing from serialized "
"tf.function dictionary."
)
return self.functions
def set_and_validate_objects(self, object_dict):
"""Saves objects to a dictionary, and validates the values."""
for key in self.all_checkpointable_objects:
if key in object_dict:
if not isinstance(
object_dict[key], tf.__internal__.tracking.Trackable
):
raise ValueError(
"The object dictionary contained a non-trackable "
f"object: {object_dict[key]} (for key {key}). "
"Only trackable objects are "
"allowed, such as TF-Keras layers/models or "
"tf.Module instances."
)
self._object_dict[key] = object_dict[key]
setattr(self._keras_trackable, key, object_dict[key])
else:
raise ValueError(
f"Object {key} missing from serialized object dictionary."
)
return self.checkpointable_objects
class CommonEndpoints(
SerializedAttributes.with_attributes(
"CommonEndpoints",
checkpointable_objects=[
"variables",
"trainable_variables",
"regularization_losses",
],
functions=[
"__call__",
"call_and_return_all_conditional_losses",
"_default_save_signature",
],
)
):
"""Common endpoints shared by all models loadable by TF-Keras.
List of all attributes:
variables: List of all variables in the model and its sublayers.
trainable_variables: List of all trainable variables in the model and its
sublayers.
regularization_losses: List of all unconditional losses (losses not
dependent on the inputs) in the model and its sublayers.
__call__: Function that takes inputs and returns the outputs of the model
call function.
call_and_return_all_conditional_losses: Function that returns a tuple of
(call function outputs, list of all losses that depend on the inputs).
_default_save_signature: Traced model call function. This is only included
if the top level exported object is a TF-Keras model.
"""
class LayerAttributes(
SerializedAttributes.with_attributes(
"LayerAttributes",
checkpointable_objects=[
"non_trainable_variables",
"layers",
"metrics",
"layer_regularization_losses",
"layer_metrics",
],
functions=[
"call_and_return_conditional_losses",
"activity_regularizer_fn",
],
copy_from=[CommonEndpoints],
)
):
"""Layer checkpointable objects + functions saved to the SavedModel.
List of all attributes:
All attributes from CommonEndpoints
non_trainable_variables: List of non-trainable variables in the layer and
its sublayers.
layers: List of all sublayers.
metrics: List of all metrics in the layer and its sublayers.
call_and_return_conditional_losses: Function that takes inputs and returns
a tuple of (outputs of the call function, list of input-dependent
losses). The list of losses excludes the activity regularizer function,
which is separate to allow the deserialized Layer object to define a
different activity regularizer.
activity_regularizer_fn: Callable that returns the activity regularizer
loss
layer_regularization_losses: List of losses owned only by this layer.
layer_metrics: List of metrics owned by this layer.
"""
class ModelAttributes(
SerializedAttributes.with_attributes(
"ModelAttributes", copy_from=[LayerAttributes]
)
):
"""Model checkpointable objects + functions saved to the SavedModel.
List of all attributes:
All attributes from LayerAttributes (including CommonEndpoints)
"""
# TODO(kathywu): Add attributes `compile_losses` and `compile_metrics`,
# which list all losses and metrics defined by `model.compile`.
class MetricAttributes(
SerializedAttributes.with_attributes(
"MetricAttributes",
checkpointable_objects=["variables"],
functions=[],
)
):
"""Attributes that are added to Metric objects when saved to SavedModel.
List of all attributes:
variables: list of all variables
"""
pass
class RNNAttributes(
SerializedAttributes.with_attributes(
"RNNAttributes",
checkpointable_objects=["states"],
copy_from=[LayerAttributes],
)
):
"""RNN checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from LayerAttributes (including CommonEndpoints)
states: List of state variables
"""
| tf-keras/tf_keras/saving/legacy/saved_model/serialized_attributes.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/serialized_attributes.py",
"repo_id": "tf-keras",
"token_count": 5812
} | 242 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run doctests for TF-Keras."""
import doctest
import re
import textwrap
import numpy as np
class _FloatExtractor(object):
"""Class for extracting floats from a string.
For example:
>>> text_parts, floats = _FloatExtractor()("Text 1.0 Text")
>>> text_parts
['Text ', ' Text']
>>> floats
array([1.])
"""
# Note: non-capturing groups "(?" are not returned in matched groups, or by
# re.split.
_FLOAT_RE = re.compile(
r"""
( # Captures the float value.
(?:
[-+]| # Start with a sign is okay anywhere.
(?: # Otherwise:
^| # Start after the start of string
(?<=[^\w.]) # Not after a word char, or a .
)
)
(?: # Digits and exponent - something like:
{digits_dot_maybe_digits}{exponent}?| # "1.0" "1." "1.0e3", "1.e3"
{dot_digits}{exponent}?| # ".1" ".1e3"
{digits}{exponent}| # "1e3"
{digits}(?=j) # "300j"
)
)
j? # Optional j for cplx numbers, not captured.
(?= # Only accept the match if
$| # * At the end of the string, or
[^\w.] # * Next char is not a word char or "."
)
""".format(
# Digits, a "." and optional more digits: "1.1".
digits_dot_maybe_digits=r"(?:[0-9]+\.(?:[0-9]*))",
# A "." with trailing digits ".23"
dot_digits=r"(?:\.[0-9]+)",
# digits: "12"
digits=r"(?:[0-9]+)",
# The exponent: An "e" or "E", optional sign, and at least one
# digit. "e-123", "E+12", "e12"
exponent=r"(?:[eE][-+]?[0-9]+)",
),
re.VERBOSE,
)
def __call__(self, string):
"""Extracts floats from a string.
>>> text_parts, floats = _FloatExtractor()("Text 1.0 Text")
>>> text_parts
['Text ', ' Text']
>>> floats
array([1.])
Args:
string: the string to extract floats from.
Returns:
A (string, array) pair, where `string` has each float replaced by
"..." and `array` is a `float32` `numpy.array` containing the
extracted floats.
"""
texts = []
floats = []
for i, part in enumerate(self._FLOAT_RE.split(string)):
if i % 2 == 0:
texts.append(part)
else:
floats.append(float(part))
return texts, np.array(floats)
class KerasDoctestOutputChecker(doctest.OutputChecker, object):
"""Customizes how `want` and `got` are compared, see `check_output`."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extract_floats = _FloatExtractor()
self.text_good = None
self.float_size_good = None
_ADDRESS_RE = re.compile(r"\bat 0x[0-9a-f]*?>")
# TODO(yashkatariya): Add other tensor's string substitutions too.
# tf.RaggedTensor doesn't need one.
_NUMPY_OUTPUT_RE = re.compile(r"<tf.Tensor.*?numpy=(.*?)>", re.DOTALL)
def _allclose(self, want, got, rtol=1e-3, atol=1e-3):
return np.allclose(want, got, rtol=rtol, atol=atol)
def _tf_tensor_numpy_output(self, string):
modified_string = self._NUMPY_OUTPUT_RE.sub(r"\1", string)
return modified_string, modified_string != string
MESSAGE = textwrap.dedent(
"""\n
#############################################################
Check the documentation (go/testable-docstrings) on how to
write testable docstrings.
#############################################################"""
)
def check_output(self, want, got, optionflags):
"""Compares the docstring output to the output gotten by running the
code.
Python addresses in the output are replaced with wildcards.
Float values in the output compared as using `np.allclose`:
* Float values are extracted from the text and replaced with
wildcards.
* The wildcard text is compared to the actual output.
* The float values are compared using `np.allclose`.
The method returns `True` if both the text comparison and the numeric
comparison are successful.
The numeric comparison will fail if either:
* The wrong number of floats are found.
* The float values are not within tolerence.
Args:
want: The output in the docstring.
got: The output generated after running the snippet.
optionflags: Flags passed to the doctest.
Returns:
A bool, indicating if the check was successful or not.
"""
# If the docstring's output is empty and there is some output generated
# after running the snippet, return True. This is because if the user
# doesn't want to display output, respect that over what the doctest
# wants.
if got and not want:
return True
if want is None:
want = ""
# Replace python's addresses with ellipsis (`...`) since it can change
# on each execution.
want = self._ADDRESS_RE.sub("at ...>", want)
# Replace tf.Tensor strings with only their numpy field values.
want, want_changed = self._tf_tensor_numpy_output(want)
if want_changed:
got, _ = self._tf_tensor_numpy_output(got)
# Separate out the floats, and replace `want` with the wild-card version
# "result=7.0" => "result=..."
want_text_parts, self.want_floats = self.extract_floats(want)
want_text_wild = "...".join(want_text_parts)
# Find the floats in the string returned by the test
_, self.got_floats = self.extract_floats(got)
self.text_good = super().check_output(
want=want_text_wild, got=got, optionflags=optionflags
)
if not self.text_good:
return False
if self.want_floats.size == 0:
# If there are no floats in the "want" string, ignore all the floats
# in the result. "np.array([ ... ])" matches "np.array([ 1.0, 2.0
# ])"
return True
self.float_size_good = self.want_floats.size == self.got_floats.size
if self.float_size_good:
return self._allclose(self.want_floats, self.got_floats)
else:
return False
def output_difference(self, example, got, optionflags):
got = [got]
# If the some of the float output is hidden with `...`,
# `float_size_good` will be False. This is because the floats extracted
# from the string is converted into a 1-D numpy array. Hence hidding
# floats is not allowed anymore.
if self.text_good:
if not self.float_size_good:
got.append(
"\n\nCAUTION: tf_doctest doesn't work if *some* of the "
'*float output* is hidden with a "...".'
)
got.append(self.MESSAGE)
got = "\n".join(got)
return super().output_difference(example, got, optionflags)
| tf-keras/tf_keras/testing_infra/keras_doctest_lib.py/0 | {
"file_path": "tf-keras/tf_keras/testing_infra/keras_doctest_lib.py",
"repo_id": "tf-keras",
"token_count": 3588
} | 243 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow.compat.v2 as tf
import tf_keras as keras
# isort: off
from tensorflow.python.framework.memory_checker import (
MemoryChecker,
)
class MemoryCheckerTest(tf.test.TestCase):
def testKerasBasic(self):
# TODO(kkb): Fix the slowness on Forge.
self.skipTest("This test is too slow on Forge so disabled for now.")
x = tf.zeros([1, 1])
y = tf.constant([[3]])
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=1))
model.compile(loss="mean_squared_error")
with MemoryChecker() as memory_checker:
for _ in range(10):
model.fit(x, y)
model.evaluate(x, y)
memory_checker.record_snapshot()
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
def testKerasAdvanced(self):
# TODO(kkb): Fix the slowness on Forge.
self.skipTest("This test is too slow on Forge so disabled for now.")
# A real world example taken from the following.
# https://github.com/tensorflow/tensorflow/issues/32500
# b/142150794
with MemoryChecker() as memory_checker:
rows = 6
columns = 7
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=[rows * columns, 3]),
keras.layers.Dense(7, input_shape=[rows * columns * 3]),
]
)
model.compile(
optimizer=keras.optimizers.legacy.gradient_descent.SGD(lr=0.01),
loss="mean_squared_error",
metrics=["accuracy"],
)
states = [[1] * rows * columns for _ in range(20)]
f = tf.one_hot(states, dtype="float32", depth=3)
for _ in range(20):
model.predict(f, steps=10)
memory_checker.record_snapshot()
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/tests/memory_checker_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/memory_checker_test.py",
"repo_id": "tf-keras",
"token_count": 1212
} | 244 |
#!/bin/bash
BAZEL_VERSION=5.4.0
rm -rf ~/bazel
mkdir ~/bazel
pushd ~/bazel
wget https://github.com/bazelbuild/bazel/releases/download/"${BAZEL_VERSION}"/bazel-"${BAZEL_VERSION}"-installer-linux-x86_64.sh
chmod +x bazel-*.sh
./bazel-"${BAZEL_VERSION}"-installer-linux-x86_64.sh --user
rm bazel-"${BAZEL_VERSION}"-installer-linux-x86_64.sh
popd
PATH="/home/kbuilder/bin:$PATH"
which bazel
bazel version
TAG_FILTERS="-no_oss,-oss_excluded,-oss_serial,-gpu,-benchmark-test,-no_oss_py3,-no_pip,-nopip"
bazel build \
--define=use_fast_cpp_protos=false \
--build_tag_filters="${TAG_FILTERS}" \
-- //tf_keras/...
| tf-keras/tf_keras/tools/bazel_build.sh/0 | {
"file_path": "tf-keras/tf_keras/tools/bazel_build.sh",
"repo_id": "tf-keras",
"token_count": 274
} | 245 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for file download and caching."""
import functools
import hashlib
import multiprocessing.dummy
import os
import pathlib
import queue
import random
import shutil
import tarfile
import threading
import time
import typing
import urllib
import warnings
import weakref
import zipfile
from abc import abstractmethod
from contextlib import closing
import numpy as np
import tensorflow.compat.v2 as tf
from six.moves.urllib.parse import urlsplit
from tf_keras.utils import io_utils
from tf_keras.utils import tf_inspect
from tf_keras.utils.generic_utils import Progbar
# isort: off
from tensorflow.python.util.tf_export import keras_export
from six.moves.urllib.request import urlopen
# Required to support google internal urlretrieve
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS.
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on
establishment of the network connection and once after each block
read thereafter. The hook will be passed three arguments; a count
of blocks transferred so far, a block size in bytes, and the total
size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get("Content-Length")
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, "wb") as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a TF-Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return (
tf_inspect.isgenerator(x)
or isinstance(x, Sequence)
or isinstance(x, typing.Iterator)
)
def _resolve_path(path):
return os.path.realpath(os.path.abspath(path))
def _is_path_in_dir(path, base_dir):
return _resolve_path(os.path.join(base_dir, path)).startswith(base_dir)
def _is_link_in_dir(info, base):
tip = _resolve_path(os.path.join(base, os.path.dirname(info.name)))
return _is_path_in_dir(info.linkname, base_dir=tip)
def _filter_safe_paths(members):
base_dir = _resolve_path(".")
for finfo in members:
valid_path = False
if _is_path_in_dir(finfo.name, base_dir):
valid_path = True
yield finfo
elif finfo.issym() or finfo.islnk():
if _is_link_in_dir(finfo, base_dir):
valid_path = True
yield finfo
if not valid_path:
warnings.warn(
"Skipping invalid path during archive extraction: "
f"'{finfo.name}'."
)
def _extract_archive(file_path, path=".", archive_format="auto"):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: Path to the archive file.
path: Where to extract the archive file.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default 'auto' is `['tar', 'zip']`.
`None` or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == "auto":
archive_format = ["tar", "zip"]
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = io_utils.path_to_string(file_path)
path = io_utils.path_to_string(path)
for archive_type in archive_format:
if archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
if zipfile.is_zipfile(file_path):
# Zip archive.
archive.extractall(path)
else:
# Tar archive, perhaps unsafe. Filter paths.
archive.extractall(
path, members=_filter_safe_paths(archive)
)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export("keras.utils.get_file")
def get_file(
fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir="datasets",
hash_algorithm="auto",
extract=False,
archive_format="auto",
cache_dir=None,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
extract=True,
)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location. If `None`, the
name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the TF-Keras cache dir where the file
is saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to `~/.keras/`.
Returns:
Path to the downloaded file.
⚠️ **Warning on malicious downloads** ⚠️
Downloading something from the Internet carries a risk.
NEVER download a file/archive if you do not trust the source.
We recommend that you specify the `file_hash` argument
(if the hash of the source file is known) to make sure that the file you
are getting is the one you expect.
"""
if origin is None:
raise ValueError(
'Please specify the "origin" argument (URL of the file '
"to download)."
)
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser("~"), ".keras")
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = "md5"
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = io_utils.path_to_string(fname)
if not fname:
fname = os.path.basename(urlsplit(origin).path)
if not fname:
raise ValueError(
"Can't parse the file name from the origin provided: "
f"'{origin}'."
"Please specify the `fname` as the input param."
)
if untar:
if fname.endswith(".tar.gz"):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix("").with_suffix("")
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
io_utils.print_msg(
"A local file was found, but it seems to be "
f"incomplete or outdated because the {hash_algorithm} "
"file hash does not match the original value of "
f"{file_hash} "
"so we will re-download the data."
)
download = True
else:
download = True
if download:
io_utils.print_msg(f"Downloading data from {origin}")
class DLProgbar:
"""Manage progress bar state for use in urlretrieve."""
def __init__(self):
self.progbar = None
self.finished = False
def __call__(self, block_num, block_size, total_size):
if not self.progbar:
if total_size == -1:
total_size = None
self.progbar = Progbar(total_size)
current = block_num * block_size
if total_size is None:
self.progbar.update(current)
else:
if current < total_size:
self.progbar.update(current)
elif not self.finished:
self.progbar.update(self.progbar.target)
self.finished = True
error_msg = "URL fetch failure on {}: {} -- {}"
try:
try:
urlretrieve(origin, fpath, DLProgbar())
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
# Validate download if succeeded and user provided an expected hash
# Security conscious users would get the hash of the file from a
# separate channel and pass it to this API to prevent MITM / corruption:
if os.path.exists(fpath) and file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
raise ValueError(
"Incomplete or corrupted file detected. "
f"The {hash_algorithm} "
"file hash does not match the provided value "
f"of {file_hash}."
)
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format="tar")
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
os.makedirs(datadir, exist_ok=True)
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == "sha256":
return hashlib.sha256()
if algorithm == "auto" and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm="sha256", chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: Path to the file being validated.
algorithm: Hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash.
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, "rb") as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter:
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls
# raise a StopIteration Exception. This, however, presents an issue when
# mixing generators and threading because it means the order of
# retrieval need not match the order in which the generator was called.
# This can make it appear that a generator exited normally when in fact
# the terminating exception is just in a different thread. In order to
# provide thread safety, once self.it has thrown an exception we
# continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export("keras.utils.Sequence")
class Sequence:
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs, you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
Notes:
`Sequence` is a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch, which is not
the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(tf.keras.utils.Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.x))
batch_x = self.x[low:high]
batch_y = self.y[low:high]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch."""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export("keras.utils.SequenceEnqueuer")
class SequenceEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of data.
"""
def __init__(self, sequence, use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value("i", 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export("keras.utils.OrderedEnqueuer")
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super().__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers,
initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()),
)
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)),
block=True,
)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e:
self.stop()
raise e
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by TF-Keras and can be terminated
using the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is
# helpful when diagnosing orphaned processes.
worker_proc.name = f"Keras_worker_{worker_proc.name}"
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return next(_SHARED_SEQUENCES[uid])
@keras_export("keras.utils.GeneratorEnqueuer")
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator, use_multiprocessing=False, random_seed=None):
super().__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()),
)
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True
)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [
future.get() for future in last_ones if future.successful()
]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if "generator already executing" in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe. "
"Keras requires a thread-safe generator when "
"`use_multiprocessing=False, workers > 1`. "
)
raise e
@keras_export(
"keras.utils.pad_sequences", "keras.preprocessing.sequence.pad_sequences"
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> tf.keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> tf.keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> tf.keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> tf.keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
Defaults to `"int32"`.
padding: String, "pre" or "post" (optional):
pad either before or after each sequence. Defaults to `"pre"`.
truncating: String, "pre" or "post" (optional):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
Defaults to `"pre"`.
value: Float or String, padding value. (Optional). Defaults to `0.`.
Returns:
Numpy array with shape `(len(sequences), maxlen)`
Raises:
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_
)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
| tf-keras/tf_keras/utils/data_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/data_utils.py",
"repo_id": "tf-keras",
"token_count": 16873
} | 246 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test covering sidecar_evaluator.py."""
import enum
import os
import shutil
import threading
import time
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.optimizers import sgd
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
from tf_keras.utils import sidecar_evaluator as sidecar_evaluator_lib
from tf_keras.utils.sidecar_evaluator import SidecarEvaluatorModelExport
# isort: off
from tensorflow.python.platform import tf_logging as logging
_BATCH_SIZE = 32
TRAIN_SAMPLES = 20
TEST_SAMPLES = 20
INPUT_DIM = 3
NUM_CLASSES = 2
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestModel(keras.Model):
def __init__(self):
super().__init__(name="test_model")
self.dense = keras.layers.Dense(10)
def call(self, inputs):
return self.dense(inputs)
class DictMetric(keras.metrics.MeanSquaredError):
def result(self):
res = super().result()
return {"mean_squared_error_1": res, "mean_squared_error_2": res}
class ModelType(enum.Enum):
SEQUENTIAL = "sequential"
SUBCLASS = "subclass"
def _test_model_builder(model_type: ModelType, compile_model, build_model):
if model_type == ModelType.SEQUENTIAL:
model = keras.Sequential([keras.layers.Dense(10)])
elif model_type == ModelType.SUBCLASS:
model = TestModel()
if compile_model:
model.compile(
sgd.SGD(),
loss="mse",
metrics=[keras.metrics.CategoricalAccuracy(), DictMetric()],
)
if build_model:
model.build((None, 32))
return model
@test_utils.run_v2_only
class SidecarEvaluatorTest(tf.test.TestCase, parameterized.TestCase):
def assertSummaryEventsWritten(self, log_dir):
# Asserts summary files do get written when log_dir is provided.
summary_files = tf.io.gfile.listdir(log_dir)
self.assertNotEmpty(
summary_files,
"Summary should have been written and log_dir should not be empty.",
)
# Asserts the content of the summary file.
event_pb_written = False
event_tags = []
for summary_file in summary_files:
for event_pb in tf.compat.v1.train.summary_iterator(
os.path.join(log_dir, summary_file)
):
if event_pb.step > 0:
self.assertEqual(event_pb.step, 32)
event_tags.append(event_pb.summary.value[0].tag)
event_pb_written = True
self.assertCountEqual(
event_tags,
[
"evaluation_categorical_accuracy_vs_iterations",
"evaluation_loss_vs_iterations",
"evaluation_mean_squared_error_1_vs_iterations",
"evaluation_mean_squared_error_2_vs_iterations",
],
)
# Verifying at least one non-zeroth step is written to summary.
self.assertTrue(event_pb_written)
def assertModelsSameVariables(self, model_a, model_b):
# Check both have the same number of variables.
self.assertEqual(len(model_a.variables), len(model_b.variables))
# Check variable values to be equal.
for var_a, var_b in zip(model_a.variables, model_b.variables):
self.assertAllEqual(var_a.numpy(), var_b.numpy())
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
)
)
def testIterationsNotSavedWillRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=True
)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2
)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir
)
with self.assertRaisesRegex(
RuntimeError,
"`iterations` cannot be loaded from the checkpoint file.",
):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
)
)
def testModelNotBuiltRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=False
)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2
)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir
)
with self.assertRaisesRegex(AssertionError, "Nothing to load."):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False],
)
)
def testSidecarEvaluatorOutputsSummary(self, model_type, build_model):
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False
)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=1)
# Save a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "ckpt")
log_dir = os.path.join(self.get_temp_dir(), "summary")
logging.info(
"checkpoint_dir = %s, log_dir = %s", checkpoint_dir, log_dir
)
checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2
)
logging.info(
"Checkpoint manager saved to: %s", checkpoint_manager.save()
)
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
"Checkpoint should have been written and "
"checkpoint_dir should not be empty.",
)
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model
)
# Have a sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)],
)
sidecar_evaluator.start()
# Eval model has been restored to the same state as the original model,
# so their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
self.assertSummaryEventsWritten(os.path.join(log_dir, "validation"))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False],
)
)
def testSidecarEvaluatorOutputsSummarySavedWithCallback(
self, model_type, build_model
):
checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoints")
log_dir = os.path.join(self.get_temp_dir(), "summary")
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False
)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(_BATCH_SIZE)
save_callback = keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, "ckpt-{epoch}"),
save_weights_only=True,
)
model.fit(dataset, epochs=1, callbacks=[save_callback])
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
"Checkpoint should have been written and "
"checkpoint_dir should not be empty.",
)
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model
)
# Have an sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)],
)
with self.assertLogs() as cm:
sidecar_evaluator.start()
metrics_logging = [
line for line in cm.output if "End of evaluation" in line
]
self.assertLen(metrics_logging, 1)
expected_logged_metrics = [
"loss",
"categorical_accuracy",
"mean_squared_error_1",
"mean_squared_error_2",
]
for metric_name in expected_logged_metrics:
self.assertRegex(metrics_logging[0], f"{metric_name}=")
# Eval model has been restored to the same state as the original model,
# so their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
# check the iterations is restored.
self.assertEqual(
sidecar_evaluator.model.optimizer.iterations.numpy(), _BATCH_SIZE
)
self.assertSummaryEventsWritten(os.path.join(log_dir, "validation"))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False],
)
)
def testTimeoutFunction(self, model_type, build_model):
checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoints")
# Create a model with synthetic data, and fit for one epoch.
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(_BATCH_SIZE)
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model
)
# Have an sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
)
with self.assertLogs() as cm:
threading.Thread(
target=sidecar_evaluator.start, daemon=True
).start()
time.sleep(50)
metrics_logging = [
l for l in cm.output if "No checkpoints appear to be found" in l
]
self.assertGreaterEqual(len(metrics_logging), 1)
def testExperimentalDeprecatedMessage(self):
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, "warning", warning):
sidecar_evaluator_lib.SidecarEvaluatorExperimental(None, None, None)
warning_msg = (
"`tf.keras.experimental.SidecarEvaluator` endpoint is deprecated"
)
self.assertIn(warning_msg, "\n".join(warning_messages))
@test_combinations.run_with_all_model_types
def test_best_model_exporter_with_sidecarevaluator(self):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Create a model with synthetic data, and fit for 20 epochs.
layers = [
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation="relu"
),
keras.layers.Dense(NUM_CLASSES, activation="softmax"),
]
model = test_utils.get_model_from_layers(layers, input_shape=(3,))
model.compile(
loss="categorical_crossentropy",
optimizer="rmsprop",
metrics=["acc"],
)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(
os.path.join(temp_dir, "ckpt"), "ckpt-{epoch:04d}"
),
monitor="loss",
save_best_only=True,
save_weights_only=True,
save_freq="epoch",
mode="min",
)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=callbacks,
epochs=20,
verbose=0,
)
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(temp_dir, "ckpt")),
"Checkpoints should have been written and "
"checkpoint_dir should not be empty.",
)
# Have a sidecar_evaluator evaluate once.
dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(BATCH_SIZE)
sidecar_evaluator = keras.utils.SidecarEvaluator(
model=model,
data=dataset,
checkpoint_dir=os.path.join(temp_dir, "ckpt"),
max_evaluations=1,
callbacks=[
SidecarEvaluatorModelExport(
export_filepath=os.path.join(
os.path.join(temp_dir, "ckpt"),
"best_model_eval",
"best-model-{epoch:04d}",
),
checkpoint_filepath=os.path.join(
os.path.join(temp_dir, "ckpt"), "ckpt-{epoch:04d}"
),
save_weights_only=False,
monitor="loss",
mode="min",
verbose=1,
),
],
)
sidecar_evaluator.start()
# Asserts output directory exists.
assert os.path.exists(
os.path.join(os.path.join(temp_dir, "ckpt"), "best_model_eval")
)
# Asserts best model files do get written.
self.assertRegex(
str(
tf.io.gfile.listdir(
os.path.join(
os.path.join(temp_dir, "ckpt"), "best_model_eval"
)
)
),
r"(.*best-model.*)+",
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/sidecar_evaluator_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/sidecar_evaluator_test.py",
"repo_id": "tf-keras",
"token_count": 7803
} | 247 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras utilities to split v1 and v2 classes."""
import abc
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.engine import base_layer
from tf_keras.engine import base_layer_v1
from tf_keras.engine import training
from tf_keras.engine import training_v1
from tf_keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes
class SplitUtilsTest(test_combinations.TestCase):
def _check_model_class(self, model_class):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertEqual(model_class, training.Model)
else:
self.assertEqual(model_class, training_v1.Model)
def _check_layer_class(self, layer):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertIsInstance(layer, base_layer.Layer)
self.assertNotIsInstance(layer, base_layer_v1.Layer)
else:
self.assertIsInstance(layer, base_layer_v1.Layer)
def test_functional_model(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
self._check_model_class(model.__class__.__bases__[0])
self._check_layer_class(model)
def test_subclass_model_with_functional_init(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model_with_functional_init_interleaved_v1_functional(
self,
):
with tf.Graph().as_default():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
_ = keras.Model(inputs, outputs)
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(1)])
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model(self):
class MyModel(keras.Model):
def call(self, x):
return 2 * x
model = MyModel()
model_class = model.__class__.__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_layer(self):
class IdentityLayer(base_layer.Layer):
"""A layer that returns it's input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
layer = IdentityLayer()
self._check_layer_class(layer)
def test_multiple_subclass_model(self):
class Model1(keras.Model):
pass
class Model2(Model1):
def call(self, x):
return 2 * x
model = Model2()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_user_provided_metaclass(self):
class AbstractModel(keras.Model, metaclass=abc.ABCMeta):
@abc.abstractmethod
def call(self, inputs):
"""Calls the model."""
class MyModel(AbstractModel):
def call(self, inputs):
return 2 * inputs
with self.assertRaisesRegex(TypeError, "instantiate abstract class"):
AbstractModel()
model = MyModel()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_multiple_inheritance(self):
class Return2:
def return_2(self):
return 2
class MyModel(keras.Model, Return2):
def call(self, x):
return self.return_2() * x
model = MyModel()
bases = model.__class__.__bases__
self._check_model_class(bases[0])
self.assertEqual(bases[1], Return2)
self.assertEqual(model.return_2(), 2)
self._check_layer_class(model)
def test_fit_error(self):
if not tf.compat.v1.executing_eagerly_outside_functions():
# Error only appears on the v2 class.
return
model = keras.Sequential([keras.layers.Dense(1)])
model.compile("sgd", "mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
with tf.compat.v1.get_default_graph().as_default():
with self.assertRaisesRegex(
ValueError, "instance was constructed with eager mode enabled"
):
model.fit(x, y, batch_size=2)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/version_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/version_utils_test.py",
"repo_id": "tf-keras",
"token_count": 2573
} | 248 |
.git
.github
*.Dockerfile
| autokeras/.dockerignore/0 | {
"file_path": "autokeras/.dockerignore",
"repo_id": "autokeras",
"token_count": 11
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from autokeras.engine import analyser
CATEGORICAL = "categorical"
NUMERICAL = "numerical"
class InputAnalyser(analyser.Analyser):
def finalize(self):
return
class ImageAnalyser(InputAnalyser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def finalize(self):
if len(self.shape) not in [3, 4]:
raise ValueError(
"Expect the data to ImageInput to have shape (batch_size, "
"height, width, channels) or (batch_size, height, width) "
"dimensions, but got input shape {shape}".format(
shape=self.shape
)
)
class TextAnalyser(InputAnalyser):
def correct_shape(self):
if len(self.shape) == 1:
return True
return len(self.shape) == 2 and self.shape[1] == 1
def finalize(self):
if not self.correct_shape():
raise ValueError(
"Expect the data to TextInput to have shape "
"(batch_size, 1), but "
"got input shape {shape}.".format(shape=self.shape)
)
if self.dtype != tf.string:
raise TypeError(
"Expect the data to TextInput to be strings, but got "
"{type}.".format(type=self.dtype)
)
class StructuredDataAnalyser(InputAnalyser):
def __init__(self, column_names=None, column_types=None, **kwargs):
super().__init__(**kwargs)
self.column_names = column_names
self.column_types = column_types
# Variables for inferring column types.
self.count_numerical = None
self.count_categorical = None
self.count_unique_numerical = []
self.num_col = None
def update(self, data):
super().update(data)
if len(self.shape) != 2:
return
if data.dtype != tf.string:
data = tf.strings.as_string(data)
data = data.numpy()
# Calculate the statistics.
for instance in data:
self._update_instance(instance)
def _update_instance(self, x):
if self.num_col is None:
self.num_col = len(x)
self.count_numerical = np.zeros(self.num_col)
self.count_categorical = np.zeros(self.num_col)
for _ in range(len(x)):
self.count_unique_numerical.append({})
for i in range(self.num_col):
x[i] = x[i].decode("utf-8")
try:
tmp_num = float(x[i])
self.count_numerical[i] += 1
if tmp_num not in self.count_unique_numerical[i]:
self.count_unique_numerical[i][tmp_num] = 1
else:
self.count_unique_numerical[i][tmp_num] += 1
except ValueError:
self.count_categorical[i] += 1
def finalize(self):
self.check()
self.infer_column_types()
def get_input_name(self):
return "StructuredDataInput"
def check(self):
if len(self.shape) != 2:
raise ValueError(
"Expect the data to {input_name} to have shape "
"(batch_size, num_features), but "
"got input shape {shape}.".format(
input_name=self.get_input_name(), shape=self.shape
)
)
# Fill in the column_names
if self.column_names is None:
if self.column_types:
raise ValueError(
"column_names must be specified, if "
"column_types is specified."
)
self.column_names = [str(index) for index in range(self.shape[1])]
# Check if column_names has the correct length.
if len(self.column_names) != self.shape[1]:
raise ValueError(
"Expect column_names to have length {expect} "
"but got {actual}.".format(
expect=self.shape[1], actual=len(self.column_names)
)
)
def infer_column_types(self):
column_types = {}
for i in range(self.num_col):
if self.count_categorical[i] > 0:
column_types[self.column_names[i]] = CATEGORICAL
elif (
len(self.count_unique_numerical[i]) / self.count_numerical[i]
< 0.05
):
column_types[self.column_names[i]] = CATEGORICAL
else:
column_types[self.column_names[i]] = NUMERICAL
# Partial column_types is provided.
if self.column_types is None:
self.column_types = {}
for key, value in column_types.items():
if key not in self.column_types:
self.column_types[key] = value
class TimeseriesAnalyser(StructuredDataAnalyser):
def get_input_name(self):
return "TimeseriesInput"
| autokeras/autokeras/analysers/input_analysers.py/0 | {
"file_path": "autokeras/autokeras/analysers/input_analysers.py",
"repo_id": "autokeras",
"token_count": 2665
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keras_tuner
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import analysers
from autokeras import blocks
from autokeras import test_utils
def test_image_build_return_tensor():
block = blocks.ImageBlock()
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_xception_return_tensor():
block = blocks.ImageBlock(block_type="xception")
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_normalize_return_tensor():
block = blocks.ImageBlock(normalize=True)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_block_augment_return_tensor():
block = blocks.ImageBlock(augment=True)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_image_deserialize_to_image():
serialized_block = blocks.serialize(blocks.ImageBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.ImageBlock)
def test_image_get_config_has_all_attributes():
block = blocks.ImageBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.ImageBlock.__init__).issubset(
config.keys()
)
def test_text_build_return_tensor():
block = blocks.TextBlock()
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_block_ngram_return_tensor():
block = blocks.TextBlock(block_type="ngram")
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_block_transformer_return_tensor():
block = blocks.TextBlock(block_type="transformer")
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_text_deserialize_to_text():
serialized_block = blocks.serialize(blocks.TextBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TextBlock)
def test_text_get_config_has_all_attributes():
block = blocks.TextBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.TextBlock.__init__).issubset(
config.keys()
)
def test_structured_build_return_tensor():
block = blocks.StructuredDataBlock()
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(2,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_structured_block_normalize_return_tensor():
block = blocks.StructuredDataBlock(normalize=True)
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(2,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_structured_block_search_normalize_return_tensor():
block = blocks.StructuredDataBlock(name="a")
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
hp = keras_tuner.HyperParameters()
hp.values["a/" + blocks.wrapper.NORMALIZE] = True
outputs = block.build(hp, keras.Input(shape=(2,), dtype=tf.string))
assert len(nest.flatten(outputs)) == 1
def test_structured_deserialize_to_structured():
serialized_block = blocks.serialize(blocks.StructuredDataBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.StructuredDataBlock)
def test_structured_get_config_has_all_attributes():
block = blocks.StructuredDataBlock()
config = block.get_config()
assert test_utils.get_func_args(
blocks.StructuredDataBlock.__init__
).issubset(config.keys())
def test_timeseries_build_return_tensor():
block = blocks.TimeseriesBlock()
block.column_names = ["0", "1"]
block.column_types = {"0": analysers.NUMERICAL, "1": analysers.NUMERICAL}
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 2), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_timeseries_deserialize_to_timeseries():
serialized_block = blocks.serialize(blocks.TimeseriesBlock())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TimeseriesBlock)
def test_timeseries_get_config_has_all_attributes():
block = blocks.TimeseriesBlock()
config = block.get_config()
assert test_utils.get_func_args(blocks.TimeseriesBlock.__init__).issubset(
config.keys()
)
| autokeras/autokeras/blocks/wrapper_test.py/0 | {
"file_path": "autokeras/autokeras/blocks/wrapper_test.py",
"repo_id": "autokeras",
"token_count": 2177
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Serializable(object):
"""Serializable from and to JSON with same mechanism as Keras Layer."""
def get_config(self):
"""Returns the current config of this object.
# Returns
Dictionary.
"""
raise NotImplementedError
@classmethod
def from_config(cls, config):
"""Build an instance from the config of this object.
# Arguments
config: Dict. The config of the object.
"""
return cls(**config)
| autokeras/autokeras/engine/serializable.py/0 | {
"file_path": "autokeras/autokeras/engine/serializable.py",
"repo_id": "autokeras",
"token_count": 354
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from autokeras import pipeline as pipeline_module
from autokeras import preprocessors
def test_pipeline_postprocess_one_hot_to_labels():
pipeline = pipeline_module.Pipeline(
inputs=[[]], outputs=[[preprocessors.OneHotEncoder(["a", "b", "c"])]]
)
assert np.array_equal(
pipeline.postprocess(np.eye(3)), [["a"], ["b"], ["c"]]
)
def test_pipeline_postprocess_multiple_one_hot_to_labels():
pipeline = pipeline_module.Pipeline(
inputs=[[]],
outputs=[
[preprocessors.OneHotEncoder(["a", "b", "c"])],
[preprocessors.OneHotEncoder(["a", "b", "c"])],
],
)
result = pipeline.postprocess([np.eye(3), np.eye(3)])
assert np.array_equal(result[0], [["a"], ["b"], ["c"]])
assert np.array_equal(result[1], [["a"], ["b"], ["c"]])
| autokeras/autokeras/pipeline_test.py/0 | {
"file_path": "autokeras/autokeras/pipeline_test.py",
"repo_id": "autokeras",
"token_count": 517
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import keras_tuner
from tensorflow import keras
import autokeras as ak
from autokeras import test_utils
from autokeras.tuners import greedy
from autokeras.tuners import task_specific
def test_greedy_oracle_get_state_update_space_can_run():
oracle = greedy.GreedyOracle(objective="val_loss")
oracle.set_state(oracle.get_state())
hp = keras_tuner.HyperParameters()
hp.Boolean("test")
oracle.update_space(hp)
@mock.patch("autokeras.tuners.greedy.GreedyOracle.get_best_trials")
def test_greedy_oracle_populate_different_values(get_best_trials):
hp = keras_tuner.HyperParameters()
test_utils.build_graph().build(hp)
oracle = greedy.GreedyOracle(objective="val_loss", seed=test_utils.SEED)
trial = mock.Mock()
trial.hyperparameters = hp
get_best_trials.return_value = [trial]
oracle.update_space(hp)
values_a = oracle.populate_space("a")["values"]
values_b = oracle.populate_space("b")["values"]
assert not all([values_a[key] == values_b[key] for key in values_a])
@mock.patch("autokeras.tuners.greedy.GreedyOracle.get_best_trials")
def test_greedy_oracle_populate_doesnt_crash_with_init_hps(get_best_trials):
hp = keras_tuner.HyperParameters()
keras.backend.clear_session()
input_node = ak.ImageInput(shape=(32, 32, 3))
input_node.batch_size = 32
input_node.num_samples = 1000
output_node = ak.ImageBlock()(input_node)
head = ak.ClassificationHead(num_classes=10)
head.shape = (10,)
output_node = head(output_node)
graph = ak.graph.Graph(inputs=input_node, outputs=output_node)
graph.build(hp)
oracle = greedy.GreedyOracle(
initial_hps=task_specific.IMAGE_CLASSIFIER,
objective="val_loss",
seed=test_utils.SEED,
)
trial = mock.Mock()
trial.hyperparameters = hp
get_best_trials.return_value = [trial]
for i in range(10):
keras.backend.clear_session()
values = oracle.populate_space("a")["values"]
hp = oracle.hyperparameters.copy()
hp.values = values
graph.build(hp)
oracle.update_space(hp)
@mock.patch("autokeras.tuners.greedy.GreedyOracle._compute_values_hash")
@mock.patch("autokeras.tuners.greedy.GreedyOracle.get_best_trials")
def test_greedy_oracle_stop_reach_max_collision(
get_best_trials, compute_values_hash
):
hp = keras_tuner.HyperParameters()
test_utils.build_graph().build(hp)
oracle = greedy.GreedyOracle(objective="val_loss", seed=test_utils.SEED)
trial = mock.Mock()
trial.hyperparameters = hp
get_best_trials.return_value = [trial]
compute_values_hash.return_value = 1
oracle.update_space(hp)
oracle.populate_space("a")["values"]
assert (
oracle.populate_space("b")["status"]
== keras_tuner.engine.trial.TrialStatus.STOPPED
)
@mock.patch("autokeras.tuners.greedy.GreedyOracle.get_best_trials")
def test_greedy_oracle_populate_space_with_no_hp(get_best_trials):
hp = keras_tuner.HyperParameters()
oracle = greedy.GreedyOracle(objective="val_loss", seed=test_utils.SEED)
trial = mock.Mock()
trial.hyperparameters = hp
get_best_trials.return_value = [trial]
oracle.update_space(hp)
values_a = oracle.populate_space("a")["values"]
assert len(values_a) == 0
| autokeras/autokeras/tuners/greedy_test.py/0 | {
"file_path": "autokeras/autokeras/tuners/greedy_test.py",
"repo_id": "autokeras",
"token_count": 1501
} | 5 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_tuner.engine import hyperparameters
from autokeras.utils import utils
def test_validate_num_inputs_error():
with pytest.raises(ValueError) as info:
utils.validate_num_inputs([1, 2, 3], 2)
assert "Expected 2 elements in the inputs list" in str(info.value)
def test_check_tf_version_error():
utils.tf.__version__ = "2.1.0"
with pytest.warns(ImportWarning) as record:
utils.check_tf_version()
assert len(record) == 1
assert (
"Tensorflow package version needs to be at least"
in record[0].message.args[0]
)
def test_check_kt_version_error():
utils.keras_tuner.__version__ = "1.0.0"
with pytest.warns(ImportWarning) as record:
utils.check_kt_version()
assert len(record) == 1
assert (
"Keras Tuner package version needs to be at least"
in record[0].message.args[0]
)
def test_run_with_adaptive_batch_size_raise_error():
def func(**kwargs):
raise tf.errors.ResourceExhaustedError(0, "", None)
with pytest.raises(tf.errors.ResourceExhaustedError):
utils.run_with_adaptive_batch_size(
batch_size=64,
func=func,
x=tf.data.Dataset.from_tensor_slices(np.random.rand(100, 1)).batch(
64
),
validation_data=tf.data.Dataset.from_tensor_slices(
np.random.rand(100, 1)
).batch(64),
)
def test_get_hyperparameter_with_none_return_hp():
hp = utils.get_hyperparameter(
None, hyperparameters.Choice("hp", [10, 20]), int
)
assert isinstance(hp, hyperparameters.Choice)
def test_get_hyperparameter_with_int_return_int():
value = utils.get_hyperparameter(
10, hyperparameters.Choice("hp", [10, 20]), int
)
assert isinstance(value, int)
assert value == 10
def test_get_hyperparameter_with_hp_return_same():
hp = utils.get_hyperparameter(
hyperparameters.Choice("hp", [10, 30]),
hyperparameters.Choice("hp", [10, 20]),
int,
)
assert isinstance(hp, hyperparameters.Choice)
| autokeras/autokeras/utils/utils_test.py/0 | {
"file_path": "autokeras/autokeras/utils/utils_test.py",
"repo_id": "autokeras",
"token_count": 1098
} | 6 |
from subprocess import CalledProcessError
from subprocess import check_call
def check_bash_call(string):
check_call(["bash", "-c", string])
def _run_format_and_flake8():
files_changed = False
try:
check_bash_call("sh shell/lint.sh")
except CalledProcessError:
check_bash_call("sh shell/format.sh")
files_changed = True
if files_changed:
print("Some files have changed.")
print("Please do git add and git commit again")
else:
print("No formatting needed.")
if files_changed:
exit(1)
def run_format_and_flake8():
try:
_run_format_and_flake8()
except CalledProcessError as error:
print("Pre-commit returned exit code", error.returncode)
exit(error.returncode)
if __name__ == "__main__":
run_format_and_flake8()
| autokeras/docker/pre_commit.py/0 | {
"file_path": "autokeras/docker/pre_commit.py",
"repo_id": "autokeras",
"token_count": 329
} | 7 |
import itertools
import re
from sphinx.util.typing import stringify
from . import utils
def get_code_blocks(docstring):
code_blocks = {}
tmp = docstring[:]
while "```" in tmp:
tmp = tmp[tmp.find("```") :]
index = tmp[3:].find("```") + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
token = f"$KERAS_AUTODOC_CODE_BLOCK_{len(code_blocks)}"
docstring = docstring.replace(snippet, token)
code_blocks[token] = snippet
tmp = tmp[index:]
return code_blocks, docstring
def get_section_end(docstring, section_start):
regex_indented_sections_end = re.compile(r"\S\n+(\S|$)")
end = re.search(regex_indented_sections_end, docstring[section_start:])
section_end = section_start + end.end()
if section_end == len(docstring):
return section_end
else:
return section_end - 2
def get_google_style_sections_without_code(docstring):
regex_indented_sections_start = re.compile(r"\n# .+?\n")
google_style_sections = {}
for i in itertools.count():
match = re.search(regex_indented_sections_start, docstring)
if match is None:
break
section_start = match.start() + 1
section_end = get_section_end(docstring, section_start)
google_style_section = docstring[section_start:section_end]
token = f"KERAS_AUTODOC_GOOGLE_STYLE_SECTION_{i}"
google_style_sections[token] = google_style_section
docstring = utils.insert_in_string(
docstring, token, section_start, section_end
)
return google_style_sections, docstring
def get_google_style_sections(docstring):
# First, extract code blocks and process them.
# The parsing is easier if the #, : and other symbols aren't there.
code_blocks, docstring = get_code_blocks(docstring)
google_style_sections, docstring = get_google_style_sections_without_code(
docstring
)
docstring = reinject_strings(docstring, code_blocks)
for section_token, section in google_style_sections.items():
google_style_sections[section_token] = reinject_strings(
section, code_blocks
)
return google_style_sections, docstring
def to_markdown(
google_style_section: str, types: dict = None, aliases=None
) -> str:
end_first_line = google_style_section.find("\n")
section_title = google_style_section[2:end_first_line]
section_body = google_style_section[end_first_line + 1 :]
section_body = utils.remove_indentation(section_body.strip())
# it's a list of elements, a special formatting is applied.
if section_title == "Arguments":
section_body = format_as_markdown_list(section_body, types, aliases)
elif section_title in ("Attributes", "Raises"):
section_body = format_as_markdown_list(section_body)
if section_body:
return f"__{section_title}__\n\n{section_body}\n"
else:
return f"__{section_title}__\n"
def format_as_markdown_list(
section_body, types: dict = None, aliases: dict = None
):
section_body = re.sub(r"\n([^ ].*?):", r"\n- __\1__:", section_body)
section_body = re.sub(r"^([^ ].*?):", r"- __\1__:", section_body)
# Optionally add type annotations to docstring
if types:
for arg, arg_type in types.items():
type_hint_str = apply_aliases(stringify(arg_type), aliases)
section_body = re.sub(
rf"(- __{arg}__)", rf"\1 `{type_hint_str}`", section_body
)
return section_body
def apply_aliases(string: str, aliases: dict):
for dotted_path, alias in aliases.items():
string = string.replace(dotted_path, alias)
return string
def reinject_strings(target, strings_to_inject):
for token, string_to_inject in strings_to_inject.items():
target = target.replace(token, string_to_inject)
return target
def process_docstring(docstring, types: dict = None, aliases=None):
if docstring[-1] != "\n":
docstring += "\n"
google_style_sections, docstring = get_google_style_sections(docstring)
for token, google_style_section in google_style_sections.items():
markdown_section = to_markdown(google_style_section, types, aliases)
docstring = docstring.replace(token, markdown_section)
return docstring
| autokeras/docs/keras_autodoc/docstring.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/docstring.py",
"repo_id": "autokeras",
"token_count": 1739
} | 8 |
## How to resume a previously killed run?
This feature is controlled by the `overwrite` argument of `AutoModel` or any other task APIs.
It is set to `False` by default,
which means it would not overwrite the contents of the directory.
In other words, it will continue the previous fit.
You can just run the same code again.
It will automatically resume the previously killed run.
## How to customize metrics and loss?
Please see the code example below.
```python
import autokeras as ak
clf = ak.ImageClassifier(
max_trials=3,
metrics=['mse'],
loss='mse',
)
```
## How to use customized metrics to select the best model?
By default, AutoKeras use validation loss as the metric for selecting the best model.
Below is a code example of using customized metric for selecting models.
Please read the comments for the details.
```python
# Implement your customized metric according to the tutorial.
# https://keras.io/api/metrics/#creating-custom-metrics
import autokeras as ak
def f1_score(y_true, y_pred):
...
clf = ak.ImageClassifier(
max_trials=3,
# Wrap the function into a Keras Tuner Objective
# and pass it to AutoKeras.
# Direction can be 'min' or 'max'
# meaning we want to minimize or maximize the metric.
# 'val_f1_score' is just add a 'val_' prefix
# to the function name or the metric name.
objective=kerastuner.Objective('val_f1_score', direction='max'),
# Include it as one of the metrics.
metrics=[f1_score],
)
```
## How to use multiple GPUs?
You can use the `distribution_strategy` argument when initializing any model you created with AutoKeras,
like AutoModel, ImageClassifier, StructuredDataRegressor and so on. This argument is supported by Keras Tuner.
AutoKeras supports the arguments supported by Keras Tuner.
Please see the discription of the argument [here](https://keras-team.github.io/keras-tuner/documentation/tuners/#tuner-class).
```python
import tensorflow as tf
import autokeras as ak
auto_model = ak.ImageClassifier(
max_trials=3,
distribution_strategy=tf.distribute.MirroredStrategy(),
)
```
## How to constrain the model size?
You can use the `max_model_size` argument for any model in AutoKeras.
```python
import autokeras as ak
auto_model = ak.ImageClassifier(
max_trials=3,
max_model_size=1000000000,
)
```
| autokeras/docs/templates/tutorial/faq.md/0 | {
"file_path": "autokeras/docs/templates/tutorial/faq.md",
"repo_id": "autokeras",
"token_count": 729
} | 9 |
import base64
import json
import os
import sys
from io import BytesIO
import requests
from PIL import Image
def main(directory):
contributors = []
for contributor in json.load(open("contributors.json")):
if contributor["type"] != "User":
continue
if contributor["login"] == "codacy-badger":
continue
contributors.append(contributor)
size = 36
gap = 3
elem_per_line = 22
width = elem_per_line * (size + gap) + gap
height = ((len(contributors) - 1) // elem_per_line + 1) * (size + gap) + gap
html = '<svg xmlns="http://www.w3.org/2000/svg" '
html += 'xmlns:xlink="http://www.w3.org/1999/xlink" '
html += 'width="{width}" height="{height}">'.format(
width=width, height=height
)
defs = "<defs>"
defs += '<rect id="rect" width="36" height="36" rx="18"/>'
defs += '<clipPath id="clip"> <use xlink:href="#rect"/> </clipPath> '
defs += "</defs>"
html += defs + "\n"
for index, contributor in enumerate(contributors):
file_name = os.path.join(directory, str(index) + ".jpeg")
response = requests.get(contributor["avatar_url"])
file = open(file_name, "wb")
file.write(response.content)
file.close()
image = Image.open(file_name)
image = image.resize((size, size))
# image.convert('RGB').save(file_name)
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("UTF-8")
xi = index % elem_per_line
yi = index // elem_per_line
x = xi * (size + gap) + gap
y = yi * (size + gap) + gap
temp = (
'<a xlink:href="{html_url}"> '
+ '<image transform="translate({x},{y})" '
+ 'xlink:href="data:image/png;base64,{img_str}" '
+ 'alt="{login}" clip-path="url(#clip)" '
+ 'width="36" height="36"/></a>'
)
temp = temp.format(
html_url=contributor["html_url"],
x=x,
y=y,
img_str=img_str,
login=contributor["login"],
)
html += temp + "\n"
html += "</svg>"
print(html)
if __name__ == "__main__":
main(sys.argv[1])
| autokeras/shell/contributors.py/0 | {
"file_path": "autokeras/shell/contributors.py",
"repo_id": "autokeras",
"token_count": 1078
} | 10 |
# Tune end-to-end ML workflows in KerasTuner
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author** | Haifeng Jin ([email protected]) |
| **Updated** | 2021-09-20 |
## Objective
Improving the user experience of KerasTuner to tune end-to-end workflows.
Reduce the learning curve and code hacks for workflows involves hyperparameters
in data preprocessing and model fitting.
## Motivation
Different users prefer different workflows for their tuning process -- like
Keras has different getting-started tutorials for engineers and researchers.
There are users who prefer to learn more about the framework and to implement
everything by overriding class methods, and users who prefer to write
everything from scratch to have a shorter learning curve and better
configurability for the details. For example, some users would like to
override `Model.train_step()` to make the code cleaner, others like to write
the training loop from scratch.
Currently, KerasTuner has good support for the users who would like to
restructure their code by learning the KerasTuner framework, and for users who
only need to do some light customization of the model building process.
However, the support for users who need to write their model building and
training process from scratch is not adequate.
Moreover, many users use the hyperparameter tuning library as an intermediate
step in their ML process rather than their main API. In their workflow,
implementing and training a model with Keras are usually a separate process
from hyperparameter tuning. They would first write the code using Keras, then
try to put it into KerasTuner to tune, and put the hyperparameter values back
into their Keras model. Therefore, we should maximize the code and model
portability in KerasTuner for these users, and minimize the code changes
required for them to adopt and remove KerasTuner.
### The old workflow
The current workflow for writing their model training process with KerasTuner
is as follows. The user defines the model in the `HyperModel.build()` function.
Defines the data preprocessing and model training by overriding
`Tuner.run_trial()`. The arguments, like the dataset, are passed through the
`Tuner.search()` function, and finally received by `Tuner.run_trial()`.
```py
import keras_tuner as kt
class MyHyperModel(kt.HyperModel):
def build(self, hp):
# Model building
model = keras.Sequential()
model.add(keras.layers.Dense(
hp.Choice('units', [8, 16, 32]),
activation='relu'))
model.add(keras.layers.Dense(1, activation='relu'))
model.compile(loss='mse')
return model
class MyTuner(kt.Tuner):
def run_trial(self, trial, *fit_args, **fit_kwargs):
hp = trial.hyperparameters
# data preprocessing
training_data, validation_data = data_preprocessing(
hp, *fit_args, **fit_kwargs)
model = self.hypermodel.build(hp)
# model training
model.fit(
training_data,
epochs=hp.Int(...),
validation_data=validation_data,
...)
# evaluation and reporting
score = model.evaluate(validation_data, ...)
self.oracle.update_trial(trial.trial_id, {'score': score})
self.save_model(trial.trial_id, model)
tuner = MyTuner(
hypermodel=MyHyperModel(),
objective=kt.Objective('score', 'min'),
...)
# Passing in the args
tuner.search(*fit_args, **fit_kwargs)
```
### Problems
The key problem of this workflow is that the code is split in two classes. Any
control flow and data flow between data preprocessing, model building, and
model training would all have to pass through the framework and function calls.
To use the framework, the user would have to understand how these different
functions are called, and wire their data and information properly between
these functions.
### Use cases to improve
The following use cases are not well supported because of the problem above.
#### Configure and jointly tune data preprocessing and model training
For example, writing a custom training loop, or tuning the data preprocessing
steps, or anything in the training loop like whether to shuffle the training
data, they need to override the `Tuner.run_trial()` function, which adds more
to the learning curve.
For example, in natural language processing, tokenization and vectorization may
affect the later model type. They will need to find a way to pass this
information from `Tuner.run_trial()` to HyperModel.build.
#### Tune existing Keras code
If the users have their code for model building and training ready written using
Keras, and they want to tune some of the hyperparameters, they would have to
change the code a lot to separate their code apart and wire the data flow and
control flow between the overridden functions.
#### Retrain the model after tuning
If the user wants to retrain the model using the best hyperparameter values
found, there is not a straight-forward way to do it if they used the
hyperparameter in `Tuner.run_trial()` for data preprocessing and model
training.
## User Benefit
The use cases described above would all have smooth workflows, without much
extra code or learning of the framework.
## Design Proposal
We propose two workflows: the `Tuner` workflow and the `HyperModel` workflow to
solve the problems above.
The `Tuner` workflow is to override `Tuner.run_trial()`. The user can put all the
code for data preprocessing, model building, model training all in one place in
the `Tuner.run_trial()` function. No `HyperModel` is needed. It supports all the
use cases mentioned above by providing the maximum freedom to the user.
The `HyperModel` workflow follows the original `HyperModel` style. It is easier
to learn and needs less code compared to the first workflow, but covers all the
use cases as long as the code for building and training the model are separate.
The user only needs to override the `HyperModel.fit()` for any tuning of the
data preprocessing and model fitting process.
## Detailed Design
### The `Tuner` workflow
Here is an end-to-end code example of the new workflow.
The user only needs to override `Tuner.run_trial()` to put everything together,
including data preprocessing, model building, and model training. It returns
the evaluation results back to the tuner.
```py
class MyTuner(kt.Tuner):
def run_trial(self, trial, x, y, callbacks=None, **kwargs):
hp = trial.hyperparameters
# Data preprocessing
num_features = hp.Int("num_features", 10, 15)
x, y = feature_selection(num_features=num_features, x, y)
# Model building
# Input shape depending on data preprocessing.
inputs = keras.Input(shape=(num_features,))
outputs = keras.layers.Dense(
hp.Choice('units', [8, 16, 32]),
activation='relu')(inputs)
outputs = keras.layers.Dense(1, activation='relu')(outputs)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss='mse',
metrics=['mae'])
# Model training
history = model.fit(
x,
y,
epochs=100,
validation_data=validation_data,
# Tune whether to use shuffle.
shuffle=hp.Boolean("shuffle"),
# Tune whether to use sample_weights.
sample_weight=sample_weight if hp.Boolean("sample_weight") else None,
# The provided callbacks list contains checkpointing and tensorboard.
callbacks=callbacks)
# Save the model to a unique path with `trial_id`.
model.save(os.path.join(trial.trial_id, 'model'))
# Returning the evaluation results
return np.min(history.history["val_mae"])
# When Tuner.run_trial is overridden,
# `hypermodel` and `objective` are optional.
tuner = MyTuner(
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
# Anything passed to `search()` will
# go to `**kwargs` for `Tuner.run_trial()`.
tuner.search(x, y)
# Get the best model.
best_model = tuner.get_best_models()[0]
```
There are several important features in this workflow:
* Tune the arguments in `HyperModel.fit()`, like `shuffle` and `sample_weight`.
* Share local variables across the workflow. For example, the model building
process can access the `num_features`, which is a variable in data
preprocessing. It solves the problem of joint tuning.
* Use built-in callbacks for convenience. The callbacks argument contains
callback functions for checkpointing and TensorBoard setup.
* The return value is flexible. It can be a single value, or a list of values,
or a dictionary of metrics, or even a `History` object returned by
`model.fit()`.
* The `hypermodel` and `objective` can be optional. The user doesn't need to
define a `HyperModel`. If the return value is a single value, it will be
minimized by default. Therefore, objective is also optional.
* The user can build a unique path to save each model with `trial.trial_id`.
For the use case of reusing existing Keras code. The user can use the following
workflow, which calls a function using all the hyperparameters. The user only
needs to write a function to call the existing Keras code and return the
evaluation results.
```py
class MyTuner(kt.Tuner):
def run_trial(self, trial, **kwargs):
hp = trial.hyperparameters
return build_and_evaluate_model(
hp.Int("num_features", 10, 15),
hp.Choice('units', [8, 16, 32]),
...
trial.trial_id,
))
# Save model can be handled by the user.
# `trial_id` is unique for each trial.
tuner = MyTuner(...)
tuner.search()
# Retraining the model
build_and_evaluate_model(**tuner.get_best_hyperparameters()[0])
```
In this workflow, the user can easily retrain the model by calling the function again with the best hyperparameters.
### The HyperModel workflow
For users who prefer to follow the old workflow, they can also implement the HyperModel above by overriding the build function and the fit function. The build function builds and returns the model. The fit function does the data preprocessing and model training.
Following is a code example implementing the same functionality of the code example above.
```py
import numpy as np
import keras_tuner as kt
from tensorflow import keras
class MyHyperModel(kt.HyperModel):
def build(self, hp):
# Model building
# Input shape depends on a hyperparameter used by data preprocessing.
inputs = keras.Input(shape=(hp.Int("num_features", 10, 15),))
x = keras.layers.Dense(
hp.Choice('units', [8, 16, 32]),
activation='relu')(inputs)
outputs = keras.layers.Dense(1, activation='relu')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss='mse',
metrics=['mae'])
return model
def fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs):
# Data preprocessing
# Get the hyperparameter value used in `build()`.
x, y = feature_selection(num_features=hp.get("num_features"), x, y)
# Model training
# Returning the training history
# or a similar dictionary if using custom training loop.
return model.fit(
x,
y,
epochs=100,
validation_data=validation_data,
# Tune whether to use shuffle.
shuffle=hp.Boolean("shuffle"),
# Tune whether to use sample_weights.
sample_weight=sample_weight if hp.Boolean("sample_weight") else None,
# The provided callbacks list contains checkpointing and tensorboard.
callbacks=callbacks)
tuner = kt.RandomSearch(
hypermodel=MyHyperModel(),
objective=kt.Objective('val_mae', 'min'),
directory='dir',
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
# Any arg passed to `search()` would be passed to `fit()`.
tuner.search(x, y)
# Exporting the best models.
models = tuner.get_best_models(num_models=2)
# Retraining the model with the second best hyperparameters.
second_best_hp = tuner.get_best_hyperparameters(num_models=2)[1]
hypermodel = MyHyperModel()
model = hypermodel.build(second_best_hp)
hypermodel.fit(
hp=second_best_hp,
model=model,
x=new_x,
y=new_y,
validation_data=new_validation_data,
# Save the model at its best epoch to a custom path
callbacks=[tf.keras.callbacks.ModelCheckpoint(
filepath="path_to_checkpoint",
monitor='val_loss',
save_best_only=True)])
# Save the final model.
model.save("path_to_saved_model")
```
Please take note of the following four points:
* Similar to `Tuner.run_trial()`, the return value of the fit function supports
all different formats.
* The user can use built-in callbacks just like in `Tuner.run_trial()`.
* `build()` and `fit()` can share hyperparameters. In this example,
`num_features` is shared between the two functions. In `fit()`, we can use
`hp.get()` to obtain the value of a hyperparameter used in `build()`.
* We can easily retrain the model with any hyperparameter value set with
`hypermodel.build()` and `hypermodel.fit()`.
With these proposed workflows, the user now has the maximum flexibility. Any
step in an end-to-end machine learning workflow can be tuned. Moreover, the
changes needed to tune existing Keras code is minimized.
Here we present HyperModel code examples of three important use cases:
* Text tokenization.
* Custom training loop.
* Fine tuning with pretrained weights.
#### Text tokenization
```py
import json
# Save the vocabulary to disk before search.
text_vectorizer = layers.TextVectorization()
text_vectorizer.adapt(dataset.map(lambda x, y: x))
with open('vocab.json', 'w') as f:
json.dump(text_vectorizer.get_vocabulary(), f)
class MyHyperModel(kt.HyperModel):
def build(self, hp):
inputs = keras.Input(shape=(10,))
outputs = layers.Embedding(
# max_token is a hyperparameter also used in text vectorization.
input_dim=hp.Int("max_tokens", 100, 500, step=100),
output_dim=64)(inputs)
outputs = layers.LSTM(hp.Int("units", 32, 128, step=32))(outputs)
outputs = layers.Dense(1, activation='sigmoid')(outputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse')
return model
def fit(self, hp, model, dataset, validation_data, callbacks, **kwargs):
# Load the vocabulary from file.
with open('vocab.json', 'r') as f:
vocab = json.load(f)
# Create and adapt the text vectorizer.
text_vectorizer = layers.TextVectorization(
# The max_tokens is a hyperparameter created in build().
vocabulary=vocab[:hp.get("max_tokens")],
output_mode="int",
output_sequence_length=10)
return model.fit(
# Convert x from strings to integer vectors.
dataset.map(
lambda x, y: (text_vectorizer(x), y),
num_parallel_calls=tf.data.AUTOTUNE),
validation_data=validation_data,
callbacks=callbacks,
)
```
#### Custom training loop
```py
class MyHyperModel(kt.HyperModel):
def build(self, hp):
inputs = keras.Input(shape=(10,))
outputs = layers.Dense(hp.Int("units", 16, 128), activation='relu')(inputs)
outputs = layers.Dense(1, activation='sigmoid')
model = keras.Model(inputs, outputs)
return model
def fit(self, hp, model, dataset, validation_data, **kwargs):
lr = hp.Float("learning_rate", 1e-4, 1e-2, sampling="log", default=1e-3)
optimizer = tf.keras.optimizers.Adam(lr)
loss_tracker = tf.keras.metrics.Mean()
# Track the validation loss
val_loss_tracker = tf.keras.metrics.Mean()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
# Record the minimum validation loss during fit.
min_val_loss = float("inf")
@tf.function
def run_train_step(data):
images = tf.dtypes.cast(data[0], "float32") / 255.0
labels = data[1]
with tf.GradientTape() as tape:
logits = model(images)
loss = loss_fn(labels, logits)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
loss_tracker.update_state(loss)
@tf.function
def run_val_step(data):
images = tf.dtypes.cast(data[0], "float32") / 255.0
labels = data[1]
logits = model(images)
loss = loss_fn(labels, logits)
val_loss_tracker.update_state(loss)
for epoch in range(2):
for batch, data in enumerate(dataset):
run_train_step(data)
print(f"Epoch loss: {loss_tracker.result().numpy()}")
loss_tracker.reset_states()
for batch, data in enumerate(validation_data):
run_val_step(data)
val_loss = val_loss_tracker.result().numpy()
min_val_loss = min(min_val_loss, val_loss)
print(f"Epoch val_loss: {val_loss}")
val_loss_tracker.reset_states()
return min_val_loss
```
You may also subclass `keras.Model` to override `train_step()`.
#### Fine tuning with pretrained weights
```py
class MyHyperModel(kt.HyperModel):
def build(self, hp):
return keras.Sequential([
keras.applications.ResNet50(
weights="imagenet",
input_shape=(32, 32, 3),
include_top=False,
),
layers.GlobalAveragePooling2D(),
layers.Dense(hp.Int("units", 32, 128)),
layers.Dense(1),
])
def fit(self, hp, model, dataset, validation_data, callbacks, **kwargs):
# Fit the model with the `base_model` freezed.
model.layers[0].trainable = False
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
model.fit(dataset, epochs=20)
# Fit the model again with some layers in the `base_model` freezed.
model.layers[0].trainable = True
for layer in model.layers[:hp.Int("freeze", 0, 20)]:
layer.trainable = False
model.compile(
# Use a smaller learning rate.
optimizer=keras.optimizers.Adam(learning_rate=1e-5),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
return model.fit(
dataset,
epochs=20,
callbacks=callbacks,
validation_data=validation_data)
```
### API documentation
The APIs in the new `HyperModel` class are as follows.
```py
class HyperModel():
def fit(self, hp, model, callbacks, **kwargs):
"""Train the model.
Args:
hp: HyperParameters.
model: `keras.Model` built in the `build()` function.
callbacks: A list of prebuild Keras callbacks for model checkpointing
and tensorboard configuration.
**kwargs: Anything the user defines. They are passed from
`Tuner.search()`.
Returns:
A `History` object, a similar dictionary, or a single value.
"""
pass
class Tuner():
def run_trial(self, trial, callbacks, **kwargs):
"""Train the model.
Args:
trial: Trial. The current Trial object.
callbacks: A list of prebuild Keras callbacks for model checkpointing
and tensorboard configuration.
**kwargs: Anything the user defines. They are passed from Tuner.search().
Returns:
A `History` object, a similar dictionary, or a single value.
"""
```
## Questions and Discussion Topics
Does the fit function need `trial_id` in the args to do model saving? The user
may need this arg to build unique saving paths for the models.
| governance/rfcs/20210920-tune-end-to-end-ml-workflows-in-keras-tuner.md/0 | {
"file_path": "governance/rfcs/20210920-tune-end-to-end-ml-workflows-in-keras-tuner.md",
"repo_id": "governance",
"token_count": 6737
} | 11 |
"""ResNet50 model for Keras.
# Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)
Adapted from code contributed by BigMoyan.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
preprocess_input = imagenet_utils.preprocess_input
WEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
"""A block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the first conv layer in the block.
# Returns
Output tensor for the block.
Note that from stage 3,
the first conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
x = layers.Conv2D(64, (7, 7),
strides=(2, 2),
padding='valid',
kernel_initializer='he_normal',
name='conv1')(x)
x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
else:
warnings.warn('The output shape of `ResNet50(include_top=False)` '
'has been changed since Keras 2.2.0.')
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='resnet50')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = keras_utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
| keras-applications/keras_applications/resnet50.py/0 | {
"file_path": "keras-applications/keras_applications/resnet50.py",
"repo_id": "keras-applications",
"token_count": 5470
} | 12 |
# Keras-contrib Documentation
The source for Keras-contrib documentation is in this directory under `sources/`.
Our documentation uses extended Markdown, as implemented by [MkDocs](http://mkdocs.org).
## Building the documentation
- install pydoc-markdown: `pip install pydoc-markdown`
- `cd` to the `contrib_docs/` folder and run:
- `pydocmd serve` # Starts a local webserver: [localhost:8000](localhost:8000)
- `pydocmd build` # Builds a static site in "site" directory
| keras-contrib/contrib_docs/README.md/0 | {
"file_path": "keras-contrib/contrib_docs/README.md",
"repo_id": "keras-contrib",
"token_count": 158
} | 13 |
<div class="rst-versions" role="note" aria-label="versions">
<span class="rst-current-version" data-toggle="rst-current-version">
{% if config.repo_name == 'GitHub' %}
<a href="{{ config.repo_url }}" class="fa fa-github" style="float: left; color: #fcfcfc"> GitHub</a>
{% elif config.repo_name == 'Bitbucket' %}
<a href="{{ config.repo_url }}" class="icon icon-bitbucket" style="float: left; color: #fcfcfc"> BitBucket</a>
{% elif config.repo_name == 'GitLab' %}
<a href="{{ config.repo_url }}" class="icon icon-gitlab" style="float: left; color: #fcfcfc"> GitLab</a>
{% endif %}
{% if page.previous_page %}
<span><a href="{{ page.previous_page.url|url }}" style="color: #fcfcfc;">« Previous</a></span>
{% endif %}
{% if page.next_page %}
<span style="margin-left: 15px"><a href="{{ page.next_page.url|url }}" style="color: #fcfcfc">Next »</a></span>
{% endif %}
</span>
</div>
| keras-contrib/contrib_docs/theme/versions.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/versions.html",
"repo_id": "keras-contrib",
"token_count": 449
} | 14 |
# -*- coding: utf-8 -*-
'''DenseNet and DenseNet-FCN models for Keras.
DenseNet is a network architecture where each layer is directly connected
to every other layer in a feed-forward fashion (within each dense block).
For each layer, the feature maps of all preceding layers are treated as
separate inputs whereas its own feature maps are passed on as inputs to
all subsequent layers. This connectivity pattern yields state-of-the-art
accuracies on CIFAR10/100 (with or without data augmentation) and SVHN.
On the large scale ILSVRC 2012 (ImageNet) dataset, DenseNet achieves a
similar accuracy as ResNet, but using less than half the amount of
parameters and roughly half the number of FLOPs.
DenseNets support any input image size of 32x32 or greater, and are thus
suited for CIFAR-10 or CIFAR-100 datasets. There are two types of DenseNets,
one suited for smaller images (DenseNet) and one suited for ImageNet,
called DenseNetImageNet. They are differentiated by the strided convolution
and pooling operations prior to the initial dense block.
The following table describes the size and accuracy of DenseNetImageNet models
on the ImageNet dataset (single crop), for which weights are provided:
------------------------------------------------------------------------------------
Model type | ImageNet Acc (Top 1) | ImageNet Acc (Top 5) | Params (M) |
------------------------------------------------------------------------------------
| DenseNet-121 | 25.02 % | 7.71 % | 8.0 |
| DenseNet-169 | 23.80 % | 6.85 % | 14.3 |
| DenseNet-201 | 22.58 % | 6.34 % | 20.2 |
| DenseNet-161 | 22.20 % | - % | 28.9 |
------------------------------------------------------------------------------------
DenseNets can be extended to image segmentation tasks as described in the
paper "The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for
Semantic Segmentation". Here, the dense blocks are arranged and concatenated
with long skip connections for state of the art performance on the CamVid dataset.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic
Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
This implementation is based on the following reference code:
- https://github.com/gpleiss/efficient_densenet_pytorch
- https://github.com/liuzhuang13/DenseNet
'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import Reshape
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import UpSampling2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import concatenate
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.applications.imagenet_utils import preprocess_input as _preprocess_input
import keras.backend as K
from keras_contrib.layers import SubPixelUpscaling
DENSENET_121_WEIGHTS_PATH = (r'https://github.com/titu1994/DenseNet/releases/download'
r'/v3.0/DenseNet-BC-121-32.h5')
DENSENET_161_WEIGHTS_PATH = (r'https://github.com/titu1994/DenseNet/releases/download'
r'/v3.0/DenseNet-BC-161-48.h5')
DENSENET_169_WEIGHTS_PATH = (r'https://github.com/titu1994/DenseNet/releases/download'
r'/v3.0/DenseNet-BC-169-32.h5')
DENSENET_121_WEIGHTS_PATH_NO_TOP = (r'https://github.com/titu1994/DenseNet/releases/'
r'download/v3.0/DenseNet-BC-121-32-no-top.h5')
DENSENET_161_WEIGHTS_PATH_NO_TOP = (r'https://github.com/titu1994/DenseNet/releases/'
r'download/v3.0/DenseNet-BC-161-48-no-top.h5')
DENSENET_169_WEIGHTS_PATH_NO_TOP = (r'https://github.com/titu1994/DenseNet/releases/'
r'download/v3.0/DenseNet-BC-169-32-no-top.h5')
def preprocess_input(x, data_format=None):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
# Returns
Preprocessed tensor.
"""
x = _preprocess_input(x, data_format=data_format)
x *= 0.017 # scale values
return x
def DenseNet(input_shape=None,
depth=40,
nb_dense_block=3,
growth_rate=12,
nb_filter=-1,
nb_layers_per_block=-1,
bottleneck=False,
reduction=0.0,
dropout_rate=0.0,
weight_decay=1e-4,
subsample_initial_block=False,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=10,
activation='softmax',
transition_pooling='avg'):
'''Instantiate the DenseNet architecture.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` dim ordering)
or `(3, 224, 224)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(224, 224, 3)` would be one valid value.
depth: number or layers in the DenseNet
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. -1 indicates initial
number of filters will default to 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the network depth.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be nb_dense_block
bottleneck: flag to add bottleneck blocks in between dense blocks
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Changes model type to suit different datasets.
Should be set to True for ImageNet, and False for CIFAR datasets.
When set to True, the initial convolution will be strided and
adds a MaxPooling2D before the initial dense block.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
'imagenet' (pre-training on ImageNet)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of
'softmax' or 'sigmoid'. Note that if sigmoid is used,
classes must be 1.
transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
None for no pooling during scale transition blocks. Please note that this
default differs from the DenseNetFCN paper in accordance with the DenseNet
paper.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
growth_rate, nb_filter, nb_layers_per_block, bottleneck,
reduction, dropout_rate, weight_decay,
subsample_initial_block, pooling, activation,
transition_pooling)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='densenet')
# load weights
if weights == 'imagenet':
weights_loaded = False
if ((depth == 121) and (nb_dense_block == 4) and (growth_rate == 32) and
(nb_filter == 64) and (bottleneck is True) and (reduction == 0.5) and
subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-121-32.h5',
DENSENET_121_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a439dd41aa672aef6daba4ee1fd54abd')
else:
weights_path = get_file('DenseNet-BC-121-32-no-top.h5',
DENSENET_121_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='55e62a6358af8a0af0eedf399b5aea99')
model.load_weights(weights_path, by_name=True)
weights_loaded = True
if ((depth == 161) and (nb_dense_block == 4) and (growth_rate == 48) and
(nb_filter == 96) and (bottleneck is True) and (reduction == 0.5) and
subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-161-48.h5',
DENSENET_161_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='6c326cf4fbdb57d31eff04333a23fcca')
else:
weights_path = get_file('DenseNet-BC-161-48-no-top.h5',
DENSENET_161_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='1a9476b79f6b7673acaa2769e6427b92')
model.load_weights(weights_path, by_name=True)
weights_loaded = True
if ((depth == 169) and (nb_dense_block == 4) and (growth_rate == 32) and
(nb_filter == 64) and (bottleneck is True) and (reduction == 0.5) and
subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-169-32.h5',
DENSENET_169_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='914869c361303d2e39dec640b4e606a6')
else:
weights_path = get_file('DenseNet-BC-169-32-no-top.h5',
DENSENET_169_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='89c19e8276cfd10585d5fadc1df6859e')
model.load_weights(weights_path, by_name=True)
weights_loaded = True
if weights_loaded:
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
if ((K.image_data_format() == 'channels_first') and
(K.backend() == 'tensorflow')):
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
print("Weights for the model were loaded successfully")
return model
def DenseNetFCN(input_shape, nb_dense_block=5, growth_rate=16, nb_layers_per_block=4,
reduction=0.0, dropout_rate=0.0, weight_decay=1E-4,
init_conv_filters=48, include_top=True, weights=None, input_tensor=None,
classes=1, activation='softmax', upsampling_conv=128,
upsampling_type='deconv', early_transition=False,
transition_pooling='max', initial_kernel_size=(3, 3)):
'''Instantiate the DenseNet FCN architecture.
Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
# Arguments
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay factor
init_conv_filters: number of layers in the initial convolution layer
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
'cifar10' (pre-training on CIFAR-10)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `channels_last` dim ordering)
or `(3, 32, 32)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of 'softmax'
or 'sigmoid'. Note that if sigmoid is used, classes must be 1.
upsampling_conv: number of convolutional layers in upsampling via subpixel
convolution
upsampling_type: Can be one of 'deconv', 'upsampling' and
'subpixel'. Defines type of upsampling algorithm used.
batchsize: Fixed batch size. This is a temporary requirement for
computation of output shape in the case of Deconvolution2D layers.
Parameter will be removed in next iteration of Keras, which infers
output shape of deconvolution layers automatically.
early_transition: Start with an extra initial transition down and end with
an extra transition up to reduce the network size.
initial_kernel_size: The first Conv2D kernel might vary in size based on the
application, this parameter makes it configurable.
# Returns
A Keras model instance.
'''
if weights not in {None}:
raise ValueError('The `weights` argument should be '
'`None` (random initialization) as no '
'model weights are provided.')
upsampling_type = upsampling_type.lower()
if upsampling_type not in ['upsampling', 'deconv', 'subpixel']:
raise ValueError('Parameter "upsampling_type" must be one of "upsampling", '
'"deconv" or "subpixel".')
if input_shape is None:
raise ValueError('For fully convolutional models, '
'input shape must be supplied.')
if type(nb_layers_per_block) is not list and nb_dense_block < 1:
raise ValueError('Number of dense layers per block must be greater than 1. '
'Argument value was %d.' % nb_layers_per_block)
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
min_size = 2 ** nb_dense_block
if K.image_data_format() == 'channels_first':
if input_shape is not None:
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
', got `input_shape=' + str(input_shape) + '`')
else:
input_shape = (classes, None, None)
else:
if input_shape is not None:
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
', got `input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, classes)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_fcn_dense_net(classes, img_input, include_top, nb_dense_block,
growth_rate, reduction, dropout_rate, weight_decay,
nb_layers_per_block, upsampling_conv, upsampling_type,
init_conv_filters, input_shape, activation,
early_transition, transition_pooling,
initial_kernel_size)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='fcn-densenet')
return model
def DenseNetImageNet121(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=121, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNetImageNet169(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=169, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 32, 32],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNetImageNet201(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 48, 32],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNetImageNet264(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=264, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 64, 48],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNetImageNet161(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=161, nb_dense_block=4, growth_rate=48,
nb_filter=96, nb_layers_per_block=[6, 12, 36, 24],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
weights=weights, input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def name_or_none(prefix, name):
return prefix + name if (prefix is not None and name is not None) else None
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None,
weight_decay=1e-4, block_prefix=None):
'''
Adds a convolution layer (with batch normalization and relu),
and optionally a bottleneck layer.
# Arguments
ip: Input tensor
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
bottleneck: if True, adds a bottleneck convolution block
dropout_rate: dropout rate
weight_decay: weight decay factor
block_prefix: str, for unique layer naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
output tensor of block
'''
with K.name_scope('ConvBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4
x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_bottleneck_conv2D'))(x)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bottleneck_bn'))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same',
use_bias=False, name=name_or_none(block_prefix, '_conv2D'))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False,
dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True,
return_concat_list=False, block_prefix=None):
'''
Build a dense_block where the output of each conv_block is fed
to subsequent ones
# Arguments
x: input keras tensor
nb_layers: the number of conv_blocks to append to the model
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
growth_rate: growth rate of the dense block
bottleneck: if True, adds a bottleneck convolution block to
each conv_block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: if True, allows number of filters to grow
return_concat_list: set to True to return the list of
feature maps along with the actual output
block_prefix: str, for block unique naming
# Return
If return_concat_list is True, returns a list of the output
keras tensor, the number of filters and a list of all the
dense blocks added to the keras tensor
If return_concat_list is False, returns a list of the output
keras tensor and the number of filters
'''
with K.name_scope('DenseBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x_list = [x]
for i in range(nb_layers):
cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay,
block_prefix=name_or_none(block_prefix, '_%i' % i))
x_list.append(cb)
x = concatenate([x, cb], axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4,
block_prefix=None, transition_pooling='max'):
'''
Adds a pointwise convolution layer (with batch normalization and relu),
and an average pooling layer. The number of output convolution filters
can be reduced by appropriately reducing the compression parameter.
# Arguments
ip: input keras tensor
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
compression: calculated as 1 - reduction. Reduces the number
of feature maps in the transition block.
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter * compression, rows / 2, cols / 2)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows / 2, cols / 2, nb_filter * compression)`
if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('Transition'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_conv2D'))(x)
if transition_pooling == 'avg':
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif transition_pooling == 'max':
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
return x
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4,
block_prefix=None):
'''Adds an upsampling block. Upsampling operation relies on the the type parameter.
# Arguments
ip: input keras tensor
nb_filters: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
type: can be 'upsampling', 'subpixel', 'deconv'. Determines
type of upsampling performed
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('TransitionUp'):
if type == 'upsampling':
x = UpSampling2D(name=name_or_none(block_prefix, '_upsampling'))(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_conv2D'))(ip)
x = SubPixelUpscaling(scale_factor=2,
name=name_or_none(block_prefix, '_subpixel'))(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_conv2D'))(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same',
strides=(2, 2), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_conv2DT'))(ip)
return x
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3,
growth_rate=12, nb_filter=-1, nb_layers_per_block=-1,
bottleneck=False, reduction=0.0, dropout_rate=None,
weight_decay=1e-4, subsample_initial_block=False, pooling=None,
activation='softmax', transition_pooling='avg'):
''' Build the DenseNet model
# Arguments
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
depth: number or layers
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. Default -1 indicates initial number
of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the depth of the network.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: add bottleneck blocks
reduction: reduction factor of transition blocks. Note : reduction value is
inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Changes model type to suit different datasets.
Should be set to True for ImageNet, and False for CIFAR datasets.
When set to True, the initial convolution will be strided and
adds a MaxPooling2D before the initial dense block.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
activation: Type of activation at the top layer. Can be one of 'softmax' or
'sigmoid'. Note that if sigmoid is used, classes must be 1.
transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
None for no pooling during scale transition blocks. Please note that this
default differs from the DenseNetFCN paper in accordance with the DenseNet
paper.
# Returns
a keras tensor
# Raises
ValueError: in case of invalid argument for `reduction`
or `nb_dense_block`
'''
with K.name_scope('DenseNet'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if reduction != 0.0:
if not (reduction <= 1.0 and reduction > 0.0):
raise ValueError('`reduction` value must lie between 0.0 and 1.0')
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
if len(nb_layers) != nb_dense_block:
raise ValueError('If `nb_dense_block` is a list, its length must match '
'the number of layers provided by `nb_layers`.')
final_nb_layer = nb_layers[-1]
nb_layers = nb_layers[:-1]
else:
if nb_layers_per_block == -1:
assert (depth - 4) % 3 == 0, ('Depth must be 3 N + 4 '
'if nb_layers_per_block == -1')
count = int((depth - 4) / 3)
if bottleneck:
count = count // 2
nb_layers = [count for _ in range(nb_dense_block)]
final_nb_layer = count
else:
final_nb_layer = nb_layers_per_block
nb_layers = [nb_layers_per_block] * nb_dense_block
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
if subsample_initial_block:
initial_kernel = (7, 7)
initial_strides = (2, 2)
else:
initial_kernel = (3, 3)
initial_strides = (1, 1)
x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal',
padding='same', name='initial_conv2D', strides=initial_strides,
use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
if subsample_initial_block:
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name='initial_bn')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % block_idx)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay,
block_prefix='tr_%i' % block_idx,
transition_pooling=transition_pooling)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate,
bottleneck=bottleneck, dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % (nb_dense_block - 1))
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='final_bn')(x)
x = Activation('relu')(x)
if include_top:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
x = Dense(nb_classes, activation=activation)(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
return x
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5,
growth_rate=12, reduction=0.0, dropout_rate=None,
weight_decay=1e-4, nb_layers_per_block=4,
nb_upsampling_conv=128, upsampling_type='deconv',
init_conv_filters=48, input_shape=None, activation='softmax',
early_transition=False, transition_pooling='max',
initial_kernel_size=(3, 3)):
''' Build the DenseNet-FCN model
# Arguments
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
reduction: reduction factor of transition blocks. Note : reduction value
is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
nb_upsampling_conv: number of convolutional layers in upsampling via subpixel
convolution
upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
type of upsampling algorithm used.
input_shape: Only used for shape inference in fully convolutional networks.
activation: Type of activation at the top layer. Can be one of 'softmax' or
'sigmoid'. Note that if sigmoid is used, classes must be 1.
early_transition: Start with an extra initial transition down and end with an
extra transition up to reduce the network size.
transition_pooling: 'max' for max pooling (default), 'avg' for average pooling,
None for no pooling. Please note that this default differs from the DenseNet
paper in accordance with the DenseNetFCN paper.
initial_kernel_size: The first Conv2D kernel might vary in size based on the
application, this parameter makes it configurable.
# Returns
a keras tensor
# Raises
ValueError: in case of invalid argument for `reduction`,
`nb_dense_block` or `nb_upsampling_conv`.
'''
with K.name_scope('DenseNetFCN'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if concat_axis == 1: # channels_first dim ordering
_, rows, cols = input_shape
else:
rows, cols, _ = input_shape
if reduction != 0.0:
if not (reduction <= 1.0 and reduction > 0.0):
raise ValueError('`reduction` value must lie between 0.0 and 1.0')
# check if upsampling_conv has minimum number of filters minimum
# is set to 12, as at least 3 color channels are needed for correct upsampling
if not (nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0):
raise ValueError('Parameter `nb_upsampling_conv` number of channels must '
'be a positive number divisible by 4 and greater than 12')
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
if len(nb_layers) != (nb_dense_block + 1):
raise ValueError('If `nb_dense_block` is a list, its length must be '
'(`nb_dense_block` + 1)')
bottleneck_nb_layers = nb_layers[-1]
rev_layers = nb_layers[::-1]
nb_layers.extend(rev_layers[1:])
else:
bottleneck_nb_layers = nb_layers_per_block
nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
x = Conv2D(init_conv_filters, initial_kernel_size,
kernel_initializer='he_normal', padding='same',
name='initial_conv2D', use_bias=False,
kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='initial_bn')(x)
x = Activation('relu')(x)
nb_filter = init_conv_filters
skip_list = []
if early_transition:
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay, block_prefix='tr_early',
transition_pooling=transition_pooling)
# Add dense blocks and transition down block
for block_idx in range(nb_dense_block):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % block_idx)
# Skip connection
skip_list.append(x)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay,
block_prefix='tr_%i' % block_idx,
transition_pooling=transition_pooling)
# this is calculated inside transition_down_block
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_down_block
# return the concatenated feature maps without the concatenation of the input
block_prefix = 'dense_%i' % nb_dense_block
_, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter,
growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
return_concat_list=True,
block_prefix=block_prefix)
skip_list = skip_list[::-1] # reverse the skip list
# Add dense blocks and transition up block
for block_idx in range(nb_dense_block):
n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]
# upsampling block must upsample only the feature maps (concat_list[1:]),
# not the concatenation of the input with the feature maps (concat_list[0].
l = concatenate(concat_list[1:], axis=concat_axis)
t = __transition_up_block(l, nb_filters=n_filters_keep,
type=upsampling_type, weight_decay=weight_decay,
block_prefix='tr_up_%i' % block_idx)
# concatenate the skip connection with the transition block
x = concatenate([t, skip_list[block_idx]], axis=concat_axis)
# Dont allow the feature map size to grow in upsampling dense blocks
block_layer_index = nb_dense_block + 1 + block_idx
block_prefix = 'dense_%i' % (block_layer_index)
x_up, nb_filter, concat_list = __dense_block(x,
nb_layers[block_layer_index],
nb_filter=growth_rate,
growth_rate=growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
return_concat_list=True,
grow_nb_filters=False,
block_prefix=block_prefix)
if early_transition:
x_up = __transition_up_block(x_up, nb_filters=nb_filter,
type=upsampling_type,
weight_decay=weight_decay,
block_prefix='tr_up_early')
if include_top:
x = Conv2D(nb_classes, (1, 1), activation='linear', padding='same',
use_bias=False)(x_up)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, nb_classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, nb_classes))(x)
else:
x = x_up
return x
| keras-contrib/keras_contrib/applications/densenet.py/0 | {
"file_path": "keras-contrib/keras_contrib/applications/densenet.py",
"repo_id": "keras-contrib",
"token_count": 25010
} | 15 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.layers import InputSpec
from keras.layers import Layer
from keras_contrib.utils.test_utils import to_tuple
class CosineDense(Layer):
"""A cosine normalized densely-connected NN layer
# Example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(CosineDense(32, input_dim=16))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# this is equivalent to the above:
model = Sequential()
model.add(CosineDense(32, input_shape=(16,)))
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(CosineDense(32))
# Note that a regular Dense layer may work better as the final layer
```
# Arguments
units: Positive integer, dimensionality of the output space.
init: name of initialization function for the weights of the layer
(see [initializers](https://keras.io/initializers)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](https://keras.io/activations)),
or alternatively, elementwise Python function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, units)`
and (units,) for weights and biases respectively.
kernel_regularizer: instance of [WeightRegularizer](
https://keras.io/regularizers)
(eg. L1 or L2 regularization), applied to the main weights matrix.
bias_regularizer: instance of [WeightRegularizer](
https://keras.io/regularizers), applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](
https://keras.io/regularizers), applied to the network output.
kernel_constraint: instance of the [constraints](
https://keras.io/constraints/) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
bias_constraint: instance of the [constraints](
https://keras.io/constraints/) module, applied to the bias.
use_bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
nD tensor with shape: `(nb_samples, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(nb_samples, input_dim)`.
# Output shape
nD tensor with shape: `(nb_samples, ..., units)`.
For instance, for a 2D input with shape `(nb_samples, input_dim)`,
the output would have shape `(nb_samples, units)`.
# References
- [Cosine Normalization: Using Cosine Similarity Instead
of Dot Product in Neural Networks](https://arxiv.org/pdf/1702.05870.pdf)
"""
def __init__(self, units, kernel_initializer='glorot_uniform',
activation=None, weights=None,
kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None, bias_constraint=None,
use_bias=True, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
self.kernel_initializer = initializers.get(kernel_initializer)
self.activation = activations.get(activation)
self.units = units
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.initial_weights = weights
super(CosineDense, self).__init__(**kwargs)
def build(self, input_shape):
input_shape = to_tuple(input_shape)
ndim = len(input_shape)
assert ndim >= 2
input_dim = input_shape[-1]
self.input_dim = input_dim
self.input_spec = [InputSpec(dtype=K.floatx(),
ndim=ndim)]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='{}_W'.format(self.name),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
if self.use_bias:
b, xb = self.bias, 1.
else:
b, xb = 0., 0.
xnorm = K.sqrt(K.sum(K.square(x), axis=-1, keepdims=True)
+ xb
+ K.epsilon())
Wnorm = K.sqrt(K.sum(K.square(self.kernel), axis=0)
+ K.square(b)
+ K.epsilon())
xWnorm = (xnorm * Wnorm)
output = K.dot(x, self.kernel) / xWnorm
if self.use_bias:
output += (self.bias / xWnorm)
return self.activation(output)
def compute_output_shape(self, input_shape):
assert input_shape
assert len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'activation': activations.serialize(self.activation),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'use_bias': self.use_bias
}
base_config = super(CosineDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/layers/core.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/core.py",
"repo_id": "keras-contrib",
"token_count": 3398
} | 16 |
from keras_contrib import activations
import keras.backend as K
import numpy as np
from numpy.testing import assert_allclose
def get_standard_values():
"""A set of floats used for testing squash.
"""
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_squash_valid():
"""Test using a reference implementation of squash.
"""
def squash(x, axis=-1):
s_squared_norm = np.sum(np.square(x), axis) + 1e-7
scale = np.sqrt(s_squared_norm) / (0.5 + s_squared_norm)
return scale * x
x = K.placeholder(ndim=2)
f = K.function([x], [activations.squash(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = squash(test_values)
assert_allclose(result, expected, rtol=1e-05)
test_squash_valid()
| keras-contrib/tests/keras_contrib/activations/test_squash.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/activations/test_squash.py",
"repo_id": "keras-contrib",
"token_count": 331
} | 17 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test
from keras_contrib.utils.test_utils import is_tf_keras
from keras import backend as K
from keras_contrib.layers import capsule
from keras.models import Sequential
@pytest.mark.parametrize('num_capsule', [10, 20])
@pytest.mark.parametrize('dim_capsule', [10, 20])
@pytest.mark.parametrize('routings', [3, 4])
@pytest.mark.parametrize('share_weights', [True, False])
@pytest.mark.parametrize('activation', ['sigmoid', 'relu'])
def test_capsule(num_capsule,
dim_capsule,
routings,
share_weights,
activation):
# TODO: removed this once the issue #25546 in the Tensorflow repo is fixed.
if is_tf_keras and not share_weights:
return
num_samples = 100
num_rows = 256
num_cols = 256
kwargs = {'num_capsule': num_capsule,
'dim_capsule': dim_capsule,
'routings': routings,
'share_weights': share_weights,
'activation': activation}
layer_test(capsule.Capsule,
kwargs=kwargs,
input_shape=(num_samples, num_rows, num_cols))
def test_capsule_correctness():
X = np.random.random((1, 1, 1))
model = Sequential()
model.add(capsule.Capsule(1, 1, 1, True, activation='sigmoid'))
model.compile(loss='mse', optimizer='rmsprop')
init_out = model.predict(X) # mock predict call to initialize weights
model.set_weights([np.zeros((1, 1, 1))])
out = model.predict(X)
assert_allclose(out, np.zeros((1, 1, 1), dtype=K.floatx()) + 0.5, atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/test_capsule.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/test_capsule.py",
"repo_id": "keras-contrib",
"token_count": 767
} | 18 |
import importlib
import inspect
import re
import sys
from itertools import compress
import pytest
modules = ['keras_contrib.layers',
'keras_contrib',
'keras_contrib.backend.tensorflow_backend',
'keras_contrib.wrappers',
'keras_contrib.utils',
'keras_contrib.callbacks',
'keras_contrib.activations',
'keras_contrib.losses',
'keras_contrib.optimizers']
accepted_name = ['from_config']
accepted_module = []
# Functions or classes with less than 'MIN_CODE_SIZE' lines can be ignored
MIN_CODE_SIZE = 10
def handle_class(name, member):
if is_accepted(name, member):
return
if member.__doc__ is None and not member_too_small(member):
raise ValueError("{} class doesn't have any documentation".format(name),
member.__module__, inspect.getmodule(member).__file__)
for n, met in inspect.getmembers(member):
if inspect.ismethod(met):
handle_method(n, met)
def handle_function(name, member):
if is_accepted(name, member) or member_too_small(member):
# We don't need to check this one.
return
doc = member.__doc__
if doc is None:
raise ValueError("{} function doesn't have any documentation".format(name),
member.__module__, inspect.getmodule(member).__file__)
args = list(inspect.signature(member).parameters.keys())
assert_args_presence(args, doc, member, name)
assert_function_style(name, member, doc, args)
assert_doc_style(name, member, doc)
def assert_doc_style(name, member, doc):
lines = doc.split("\n")
first_line = lines[0]
if len(first_line.strip()) == 0:
raise ValueError(
"{} the documentation should be on the first line.".format(name),
member.__module__)
if first_line.strip()[-1] != '.':
raise ValueError("{} first line should end with a '.'".format(name),
member.__module__)
def assert_function_style(name, member, doc, args):
code = inspect.getsource(member)
has_return = re.findall(r"\s*return \S+", code, re.MULTILINE)
if has_return and "# Returns" not in doc:
innerfunction = [inspect.getsource(x) for x in member.__code__.co_consts if
inspect.iscode(x)]
return_in_sub = [ret for code_inner in innerfunction for ret in
re.findall(r"\s*return \S+", code_inner, re.MULTILINE)]
if len(return_in_sub) < len(has_return):
raise ValueError("{} needs a '# Returns' section".format(name),
member.__module__)
has_raise = re.findall(r"^\s*raise \S+", code, re.MULTILINE)
if has_raise and "# Raises" not in doc:
innerfunction = [inspect.getsource(x) for x in member.__code__.co_consts if
inspect.iscode(x)]
raise_in_sub = [ret for code_inner in innerfunction for ret in
re.findall(r"\s*raise \S+", code_inner, re.MULTILINE)]
if len(raise_in_sub) < len(has_raise):
raise ValueError("{} needs a '# Raises' section".format(name),
member.__module__)
if len(args) > 0 and "# Arguments" not in doc:
raise ValueError("{} needs a '# Arguments' section".format(name),
member.__module__)
assert_blank_before(name, member, doc, ['# Arguments', '# Raises', '# Returns'])
def assert_blank_before(name, member, doc, keywords):
doc_lines = [x.strip() for x in doc.split('\n')]
for keyword in keywords:
if keyword in doc_lines:
index = doc_lines.index(keyword)
if doc_lines[index - 1] != '':
raise ValueError(
"{} '{}' should have a blank line above.".format(name, keyword),
member.__module__)
def is_accepted(name, member):
if 'keras' not in str(member.__module__):
return True
return name in accepted_name or member.__module__ in accepted_module
def member_too_small(member):
code = inspect.getsource(member).split('\n')
return len(code) < MIN_CODE_SIZE
def assert_args_presence(args, doc, member, name):
args_not_in_doc = [arg not in doc for arg in args]
if any(args_not_in_doc):
raise ValueError(
"{} {} arguments are not present in documentation ".format(name, list(
compress(args, args_not_in_doc))), member.__module__)
words = doc.replace('*', '').split()
# Check arguments styling
styles = [arg + ":" not in words for arg in args]
if any(styles):
raise ValueError(
"{} {} are not style properly 'argument': documentation".format(
name,
list(compress(args, styles))),
member.__module__)
# Check arguments order
indexes = [words.index(arg + ":") for arg in args]
if indexes != sorted(indexes):
raise ValueError(
"{} arguments order is different from the documentation".format(name),
member.__module__)
def handle_method(name, member):
if name in accepted_name or member.__module__ in accepted_module:
return
handle_function(name, member)
def handle_module(mod):
for name, mem in inspect.getmembers(mod):
if inspect.isclass(mem):
handle_class(name, mem)
elif inspect.isfunction(mem):
handle_function(name, mem)
elif 'keras' in name and inspect.ismodule(mem):
# Only test keras' modules
handle_module(mem)
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.3")
@pytest.mark.parametrize('module', modules)
def test_doc(module):
mod = importlib.import_module(module)
handle_module(mod)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/tooling/test_documentation.py/0 | {
"file_path": "keras-contrib/tests/tooling/test_documentation.py",
"repo_id": "keras-contrib",
"token_count": 2548
} | 19 |
"""Benchmark BERT model on GLUE/MRPC task.
To run the script, make sure you are in benchmarks/ directory, abd run the
command below:
```
python3 -m model_benchmark.bert_benchmark \
--epochs 2 \
--batch_size 32
```
"""
import time
import keras_nlp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app
from absl import flags
from absl import logging
from model_benchmark.benchmark_utils import BenchmarkMetricsCallback
import keras_core as keras
flags.DEFINE_string("model_size", "small", "The size of model to benchmark.")
flags.DEFINE_string(
"mixed_precision_policy",
"mixed_float16",
"The global precision policy to use, e.g., 'mixed_float16' or 'float32'.",
)
flags.DEFINE_integer("epochs", 2, "The number of epochs.")
flags.DEFINE_integer("batch_size", 8, "Batch Size.")
FLAGS = flags.FLAGS
MODEL_SIZE_MAP = {
"tiny": "bert_tiny_en_uncased",
"small": "bert_small_en_uncased",
"base": "bert_base_en_uncased",
"large": "bert_large_en_uncased",
}
def load_data():
"""Load data.
Load GLUE/MRPC dataset, and convert the dictionary format to
(features, label), where `features` is a tuple of all input sentences.
"""
feature_names = ("sentence1", "sentence2")
def split_features(x):
# GLUE comes with dictonary data, we convert it to a uniform format
# (features, label), where features is a tuple consisting of all
# features. This format is necessary for using KerasNLP preprocessors.
features = tuple([x[name] for name in feature_names])
label = x["label"]
return (features, label)
train_ds, test_ds, validation_ds = tfds.load(
"glue/mrpc",
split=["train", "test", "validation"],
)
train_ds = (
train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
validation_ds = (
validation_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
return train_ds, test_ds, validation_ds
def load_model():
if FLAGS.model_size not in MODEL_SIZE_MAP.keys():
raise KeyError(
f"`model_size` must be one of {MODEL_SIZE_MAP.keys()}, but "
f"received {FLAGS.model_size}."
)
return keras_nlp.models.BertClassifier.from_preset(
MODEL_SIZE_MAP[FLAGS.model_size], num_classes=2
)
def main(_):
keras.mixed_precision.set_dtype_policy(FLAGS.mixed_precision_policy)
logging.info(
"Benchmarking configs...\n"
"=========================\n"
f"MODEL: BERT {FLAGS.model_size}\n"
f"TASK: glue/mrpc \n"
f"BATCH_SIZE: {FLAGS.batch_size}\n"
f"EPOCHS: {FLAGS.epochs}\n"
"=========================\n"
)
# Load datasets.
train_ds, test_ds, validation_ds = load_data()
# Load the model.
model = load_model()
# Set loss and metrics.
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = [keras.metrics.SparseCategoricalAccuracy()]
# Configure optimizer.
lr = keras.optimizers.schedules.PolynomialDecay(
5e-4,
decay_steps=train_ds.cardinality() * FLAGS.epochs,
end_learning_rate=0.0,
)
optimizer = keras.optimizers.AdamW(lr, weight_decay=0.01)
optimizer.exclude_from_weight_decay(
var_names=["LayerNorm", "layer_norm", "bias"]
)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
benchmark_metrics_callback = BenchmarkMetricsCallback(
start_batch=1,
stop_batch=train_ds.cardinality().numpy() - 1,
)
# Start training.
logging.info("Starting Training...")
st = time.time()
history = model.fit(
train_ds,
validation_data=validation_ds,
epochs=FLAGS.epochs,
callbacks=[benchmark_metrics_callback],
)
wall_time = time.time() - st
validation_accuracy = history.history["val_sparse_categorical_accuracy"][-1]
examples_per_second = (
np.mean(np.array(benchmark_metrics_callback.state["throughput"]))
* FLAGS.batch_size
)
logging.info("Training Finished!")
logging.info(f"Wall Time: {wall_time:.4f} seconds.")
logging.info(f"Validation Accuracy: {validation_accuracy:.4f}")
logging.info(f"examples_per_second: {examples_per_second:.4f}")
if __name__ == "__main__":
app.run(main)
| keras-core/benchmarks/model_benchmark/bert_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/model_benchmark/bert_benchmark.py",
"repo_id": "keras-core",
"token_count": 1984
} | 20 |
import numpy as np
from keras_core import Model
from keras_core import layers
from keras_core import losses
from keras_core import metrics
from keras_core import optimizers
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 6
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
print("History:")
print(history.history)
model.summary()
| keras-core/examples/demo_subclass.py/0 | {
"file_path": "keras-core/examples/demo_subclass.py",
"repo_id": "keras-core",
"token_count": 412
} | 21 |
"""
Title: CycleGAN
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/08/12
Last modified: 2020/08/12
Description: Implementation of CycleGAN.
Accelerator: GPU
"""
"""
## CycleGAN
CycleGAN is a model that aims to solve the image-to-image translation
problem. The goal of the image-to-image translation problem is to learn the
mapping between an input image and an output image using a training set of
aligned image pairs. However, obtaining paired examples isn't always feasible.
CycleGAN tries to learn this mapping without requiring paired input-output images,
using cycle-consistent adversarial networks.
- [Paper](https://arxiv.org/pdf/1703.10593.pdf)
- [Original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)
"""
"""
## Setup
"""
import numpy as np
import matplotlib.pyplot as plt
import keras_core as keras
from keras_core import layers
from keras_core import ops
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
autotune = tf.data.AUTOTUNE
"""
## Prepare the dataset
In this example, we will be using the
[horse to zebra](https://www.tensorflow.org/datasets/catalog/cycle_gan#cycle_ganhorse2zebra)
dataset.
"""
# Load the horse-zebra dataset using tensorflow-datasets.
dataset, _ = tfds.load(
"cycle_gan/horse2zebra", with_info=True, as_supervised=True
)
train_horses, train_zebras = dataset["trainA"], dataset["trainB"]
test_horses, test_zebras = dataset["testA"], dataset["testB"]
# Define the standard image size.
orig_img_size = (286, 286)
# Size of the random crops to be used during training.
input_img_size = (256, 256, 3)
# Weights initializer for the layers.
kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
# Gamma initializer for instance normalization.
gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
buffer_size = 256
batch_size = 1
def normalize_img(img):
img = ops.cast(img, dtype="float32")
# Map values in the range [-1, 1]
return (img / 127.5) - 1.0
def preprocess_train_image(img, label):
# Random flip
img = tf.image.random_flip_left_right(img)
# Resize to the original size first
img = tf.image.resize(img, [*orig_img_size])
# Random crop to 256X256
img = tf.image.random_crop(img, size=[*input_img_size])
# Normalize the pixel values in the range [-1, 1]
img = normalize_img(img)
return img
def preprocess_test_image(img, label):
# Only resizing and normalization for the test images.
img = tf.image.resize(img, [input_img_size[0], input_img_size[1]])
img = normalize_img(img)
return img
"""
## Create `Dataset` objects
"""
# Apply the preprocessing operations to the training data
train_horses = (
train_horses.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
train_zebras = (
train_zebras.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
# Apply the preprocessing operations to the test data
test_horses = (
test_horses.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
test_zebras = (
test_zebras.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
"""
## Visualize some samples
"""
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, samples in enumerate(zip(train_horses.take(4), train_zebras.take(4))):
horse = (((samples[0][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
zebra = (((samples[1][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
ax[i, 0].imshow(horse)
ax[i, 1].imshow(zebra)
plt.show()
"""
## Building blocks used in the CycleGAN generators and discriminators
"""
class ReflectionPadding2D(layers.Layer):
"""Implements Reflection Padding as a layer.
Args:
padding(tuple): Amount of padding for the
spatial dimensions.
Returns:
A padded tensor with the same type as the input tensor.
"""
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
super().__init__(**kwargs)
def call(self, input_tensor, mask=None):
padding_width, padding_height = self.padding
padding_tensor = [
[0, 0],
[padding_height, padding_height],
[padding_width, padding_width],
[0, 0],
]
return ops.pad(
input_tensor, ops.convert_to_tensor(padding_tensor), mode="reflect"
)
def residual_block(
x,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(1, 1),
padding="valid",
gamma_initializer=gamma_init,
use_bias=False,
):
dim = x.shape[-1]
input_tensor = x
x = ReflectionPadding2D()(input_tensor)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = layers.GroupNormalization(
groups=x.shape[-1], gamma_initializer=gamma_initializer
)(x)
x = activation(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = layers.GroupNormalization(
groups=x.shape[-1], gamma_initializer=gamma_initializer
)(x)
x = layers.add([input_tensor, x])
return x
def downsample(
x,
filters,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = layers.GroupNormalization(
groups=x.shape[-1], gamma_initializer=gamma_initializer
)(x)
if activation:
x = activation(x)
return x
def upsample(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_init,
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
use_bias=use_bias,
)(x)
x = layers.GroupNormalization(
groups=x.shape[-1], gamma_initializer=gamma_initializer
)(x)
if activation:
x = activation(x)
return x
"""
## Build the generators
The generator consists of downsampling blocks: nine residual blocks
and upsampling blocks. The structure of the generator is the following:
```
c7s1-64 ==> Conv block with `relu` activation, filter size of 7
d128 ====|
|-> 2 downsampling blocks
d256 ====|
R256 ====|
R256 |
R256 |
R256 |
R256 |-> 9 residual blocks
R256 |
R256 |
R256 |
R256 ====|
u128 ====|
|-> 2 upsampling blocks
u64 ====|
c7s1-3 => Last conv block with `tanh` activation, filter size of 7.
```
"""
def get_resnet_generator(
filters=64,
num_downsampling_blocks=2,
num_residual_blocks=9,
num_upsample_blocks=2,
gamma_initializer=gamma_init,
name=None,
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = ReflectionPadding2D(padding=(3, 3))(img_input)
x = layers.Conv2D(
filters, (7, 7), kernel_initializer=kernel_init, use_bias=False
)(x)
x = layers.GroupNormalization(
groups=x.shape[-1], gamma_initializer=gamma_initializer
)(x)
x = layers.Activation("relu")(x)
# Downsampling
for _ in range(num_downsampling_blocks):
filters *= 2
x = downsample(x, filters=filters, activation=layers.Activation("relu"))
# Residual blocks
for _ in range(num_residual_blocks):
x = residual_block(x, activation=layers.Activation("relu"))
# Upsampling
for _ in range(num_upsample_blocks):
filters //= 2
x = upsample(x, filters, activation=layers.Activation("relu"))
# Final block
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(3, (7, 7), padding="valid")(x)
x = layers.Activation("tanh")(x)
model = keras.models.Model(img_input, x, name=name)
return model
"""
## Build the discriminators
The discriminators implement the following architecture:
`C64->C128->C256->C512`
"""
def get_discriminator(
filters=64, kernel_initializer=kernel_init, num_downsampling=3, name=None
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = layers.Conv2D(
filters,
(4, 4),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_initializer,
)(img_input)
x = layers.LeakyReLU(0.2)(x)
num_filters = filters
for num_downsample_block in range(3):
num_filters *= 2
if num_downsample_block < 2:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(2, 2),
)
else:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(1, 1),
)
x = layers.Conv2D(
1,
(4, 4),
strides=(1, 1),
padding="same",
kernel_initializer=kernel_initializer,
)(x)
model = keras.models.Model(inputs=img_input, outputs=x, name=name)
return model
# Get the generators
gen_G = get_resnet_generator(name="generator_G")
gen_F = get_resnet_generator(name="generator_F")
# Get the discriminators
disc_X = get_discriminator(name="discriminator_X")
disc_Y = get_discriminator(name="discriminator_Y")
"""
## Build the CycleGAN model
We will override the `train_step()` method of the `Model` class
for training via `fit()`.
"""
class CycleGan(keras.Model):
def __init__(
self,
generator_G,
generator_F,
discriminator_X,
discriminator_Y,
lambda_cycle=10.0,
lambda_identity=0.5,
):
super().__init__()
self.gen_G = generator_G
self.gen_F = generator_F
self.disc_X = discriminator_X
self.disc_Y = discriminator_Y
self.lambda_cycle = lambda_cycle
self.lambda_identity = lambda_identity
def call(self, inputs):
return (
self.disc_X(inputs),
self.disc_Y(inputs),
self.gen_G(inputs),
self.gen_F(inputs),
)
def compile(
self,
gen_G_optimizer,
gen_F_optimizer,
disc_X_optimizer,
disc_Y_optimizer,
gen_loss_fn,
disc_loss_fn,
):
super().compile()
self.gen_G_optimizer = gen_G_optimizer
self.gen_F_optimizer = gen_F_optimizer
self.disc_X_optimizer = disc_X_optimizer
self.disc_Y_optimizer = disc_Y_optimizer
self.generator_loss_fn = gen_loss_fn
self.discriminator_loss_fn = disc_loss_fn
self.cycle_loss_fn = keras.losses.MeanAbsoluteError()
self.identity_loss_fn = keras.losses.MeanAbsoluteError()
def train_step(self, batch_data):
# x is Horse and y is zebra
real_x, real_y = batch_data
# For CycleGAN, we need to calculate different
# kinds of losses for the generators and discriminators.
# We will perform the following steps here:
#
# 1. Pass real images through the generators and get the generated images
# 2. Pass the generated images back to the generators to check if we
# can predict the original image from the generated image.
# 3. Do an identity mapping of the real images using the generators.
# 4. Pass the generated images in 1) to the corresponding discriminators.
# 5. Calculate the generators total loss (adversarial + cycle + identity)
# 6. Calculate the discriminators loss
# 7. Update the weights of the generators
# 8. Update the weights of the discriminators
# 9. Return the losses in a dictionary
with tf.GradientTape(persistent=True) as tape:
# Horse to fake zebra
fake_y = self.gen_G(real_x, training=True)
# Zebra to fake horse -> y2x
fake_x = self.gen_F(real_y, training=True)
# Cycle (Horse to fake zebra to fake horse): x -> y -> x
cycled_x = self.gen_F(fake_y, training=True)
# Cycle (Zebra to fake horse to fake zebra) y -> x -> y
cycled_y = self.gen_G(fake_x, training=True)
# Identity mapping
same_x = self.gen_F(real_x, training=True)
same_y = self.gen_G(real_y, training=True)
# Discriminator output
disc_real_x = self.disc_X(real_x, training=True)
disc_fake_x = self.disc_X(fake_x, training=True)
disc_real_y = self.disc_Y(real_y, training=True)
disc_fake_y = self.disc_Y(fake_y, training=True)
# Generator adversarial loss
gen_G_loss = self.generator_loss_fn(disc_fake_y)
gen_F_loss = self.generator_loss_fn(disc_fake_x)
# Generator cycle loss
cycle_loss_G = (
self.cycle_loss_fn(real_y, cycled_y) * self.lambda_cycle
)
cycle_loss_F = (
self.cycle_loss_fn(real_x, cycled_x) * self.lambda_cycle
)
# Generator identity loss
id_loss_G = (
self.identity_loss_fn(real_y, same_y)
* self.lambda_cycle
* self.lambda_identity
)
id_loss_F = (
self.identity_loss_fn(real_x, same_x)
* self.lambda_cycle
* self.lambda_identity
)
# Total generator loss
total_loss_G = gen_G_loss + cycle_loss_G + id_loss_G
total_loss_F = gen_F_loss + cycle_loss_F + id_loss_F
# Discriminator loss
disc_X_loss = self.discriminator_loss_fn(disc_real_x, disc_fake_x)
disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y)
# Get the gradients for the generators
grads_G = tape.gradient(total_loss_G, self.gen_G.trainable_variables)
grads_F = tape.gradient(total_loss_F, self.gen_F.trainable_variables)
# Get the gradients for the discriminators
disc_X_grads = tape.gradient(
disc_X_loss, self.disc_X.trainable_variables
)
disc_Y_grads = tape.gradient(
disc_Y_loss, self.disc_Y.trainable_variables
)
# Update the weights of the generators
self.gen_G_optimizer.apply_gradients(
zip(grads_G, self.gen_G.trainable_variables)
)
self.gen_F_optimizer.apply_gradients(
zip(grads_F, self.gen_F.trainable_variables)
)
# Update the weights of the discriminators
self.disc_X_optimizer.apply_gradients(
zip(disc_X_grads, self.disc_X.trainable_variables)
)
self.disc_Y_optimizer.apply_gradients(
zip(disc_Y_grads, self.disc_Y.trainable_variables)
)
return {
"G_loss": total_loss_G,
"F_loss": total_loss_F,
"D_X_loss": disc_X_loss,
"D_Y_loss": disc_Y_loss,
}
"""
## Create a callback that periodically saves generated images
"""
class GANMonitor(keras.callbacks.Callback):
"""A callback to generate and save images after each epoch"""
def __init__(self, num_img=4):
self.num_img = num_img
def on_epoch_end(self, epoch, logs=None):
_, ax = plt.subplots(4, 2, figsize=(12, 12))
for i, img in enumerate(test_horses.take(self.num_img)):
prediction = self.model.gen_G(img)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.utils.array_to_img(prediction)
prediction.save(
"generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch + 1)
)
plt.show()
plt.close()
"""
## Train the end-to-end model
"""
# Loss function for evaluating adversarial loss
adv_loss_fn = keras.losses.MeanSquaredError()
# Define the loss function for the generators
def generator_loss_fn(fake):
fake_loss = adv_loss_fn(ops.ones_like(fake), fake)
return fake_loss
# Define the loss function for the discriminators
def discriminator_loss_fn(real, fake):
real_loss = adv_loss_fn(ops.ones_like(real), real)
fake_loss = adv_loss_fn(ops.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
# Create cycle gan model
cycle_gan_model = CycleGan(
generator_G=gen_G,
generator_F=gen_F,
discriminator_X=disc_X,
discriminator_Y=disc_Y,
)
# Compile the model
cycle_gan_model.compile(
gen_G_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_F_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_X_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_Y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_loss_fn=generator_loss_fn,
disc_loss_fn=discriminator_loss_fn,
)
# Callbacks
plotter = GANMonitor()
checkpoint_filepath = (
"./model_checkpoints/cyclegan_checkpoints.{epoch:03d}.weights.h5"
)
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath, save_weights_only=True
)
# Here we will train the model for just one epoch as each epoch takes around
# 7 minutes on a single P100 backed machine.
cycle_gan_model.fit(
tf.data.Dataset.zip((train_horses, train_zebras)),
epochs=1,
callbacks=[plotter, model_checkpoint_callback],
)
"""
Test the performance of the model.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/CycleGAN)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/CycleGAN).
"""
# This model was trained for 90 epochs. We will be loading those weights
# here. Once the weights are loaded, we will take a few samples from the test
# data and check the model's performance.
"""shell
curl -LO https://github.com/freedomtan/cyclegan-keras-core/archive/refs/tags/2.0.zip
unzip -qq 2.0.zip
"""
# Load the checkpoints
weight_file = "./cyclegan-keras-core-2.0/model_checkpoints/cyclegan_checkpoints.090.weights.h5"
cycle_gan_model.load_weights(weight_file)
print("Weights loaded successfully")
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, img in enumerate(test_horses.take(4)):
prediction = cycle_gan_model.gen_G(img, training=False)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.utils.array_to_img(prediction)
prediction.save("predicted_img_{i}.png".format(i=i))
plt.tight_layout()
plt.show()
| keras-core/examples/keras_io/tensorflow/generative/cyclegan.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/generative/cyclegan.py",
"repo_id": "keras-core",
"token_count": 8834
} | 22 |
"""
Title: Character-level recurrent sequence-to-sequence model
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2017/09/29
Last modified: 2020/04/26
Description: Character-level recurrent sequence-to-sequence model.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates how to implement a basic character-level
recurrent sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
**Summary of the algorithm**
- We start with input sequences from a domain (e.g. English sentences)
and corresponding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
It uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
import os
from pathlib import Path
"""
## Download the data
"""
fpath = keras.utils.get_file(
origin="http://www.manythings.org/anki/fra-eng.zip"
)
dirpath = Path(fpath).parent.absolute()
os.system(f"unzip -q {fpath} -d {dirpath}")
"""
## Configuration
"""
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 10000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = os.path.join(dirpath, "fra.txt")
"""
## Prepare the data
"""
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, "r", encoding="utf-8") as f:
lines = f.read().split("\n")
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text, _ = line.split("\t")
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = "\t" + target_text + "\n"
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print("Number of samples:", len(input_texts))
print("Number of unique input tokens:", num_encoder_tokens)
print("Number of unique output tokens:", num_decoder_tokens)
print("Max sequence length for inputs:", max_encoder_seq_length)
print("Max sequence length for outputs:", max_decoder_seq_length)
input_token_index = dict([(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)]
)
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype="float32",
)
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype="float32",
)
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype="float32",
)
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.0
encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.0
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.0
decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0
decoder_target_data[i, t:, target_token_index[" "]] = 1.0
"""
## Build the model
"""
# Define an input sequence and process it.
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
encoder = keras.layers.LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = keras.layers.LSTM(
latent_dim, return_sequences=True, return_state=True
)
decoder_outputs, _, _ = decoder_lstm(
decoder_inputs, initial_state=encoder_states
)
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
"""
## Train the model
"""
model.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(
[encoder_input_data, decoder_input_data],
decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
)
# Save model
model.save("s2s_model.keras")
"""
## Run inference (sampling)
1. encode input and retrieve initial decoder state
2. run one step of decoder with this initial state
and a "start of sequence" token as target.
Output will be the next target token.
3. Repeat with the current target token and current states
"""
# Define sampling models
# Restore the model and construct the encoder and decoder.
model = keras.models.load_model("s2s_model.keras")
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = keras.Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = keras.Input(shape=(latent_dim,))
decoder_state_input_c = keras.Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs
)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = keras.Model(
[decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states
)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items()
)
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items()
)
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq, verbose=0)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index["\t"]] = 1.0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ""
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value, verbose=0
)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (
sampled_char == "\n"
or len(decoded_sentence) > max_decoder_seq_length
):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.0
# Update states
states_value = [h, c]
return decoded_sentence
"""
You can now generate decoded sentences as such:
"""
for seq_index in range(20):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index : seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print("-")
print("Input sentence:", input_texts[seq_index])
print("Decoded sentence:", decoded_sentence)
| keras-core/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/nlp/lstm_seq2seq.py",
"repo_id": "keras-core",
"token_count": 3631
} | 23 |
"""
Title: Timeseries anomaly detection using an Autoencoder
Author: [pavithrasv](https://github.com/pavithrasv)
Date created: 2020/05/31
Last modified: 2020/05/31
Description: Detect anomalies in a timeseries using an Autoencoder.
Accelerator: GPU
"""
"""
## Introduction
This script demonstrates how you can use a reconstruction convolutional
autoencoder model to detect anomalies in timeseries data.
"""
"""
## Setup
"""
import numpy as np
import pandas as pd
import keras_core as keras
from keras_core import layers
from matplotlib import pyplot as plt
"""
## Load the data
We will use the [Numenta Anomaly Benchmark(NAB)](
https://www.kaggle.com/boltzmannbrain/nab) dataset. It provides artifical
timeseries data containing labeled anomalous periods of behavior. Data are
ordered, timestamped, single-valued metrics.
We will use the `art_daily_small_noise.csv` file for training and the
`art_daily_jumpsup.csv` file for testing. The simplicity of this dataset
allows us to demonstrate anomaly detection effectively.
"""
master_url_root = "https://raw.githubusercontent.com/numenta/NAB/master/data/"
df_small_noise_url_suffix = "artificialNoAnomaly/art_daily_small_noise.csv"
df_small_noise_url = master_url_root + df_small_noise_url_suffix
df_small_noise = pd.read_csv(
df_small_noise_url, parse_dates=True, index_col="timestamp"
)
df_daily_jumpsup_url_suffix = "artificialWithAnomaly/art_daily_jumpsup.csv"
df_daily_jumpsup_url = master_url_root + df_daily_jumpsup_url_suffix
df_daily_jumpsup = pd.read_csv(
df_daily_jumpsup_url, parse_dates=True, index_col="timestamp"
)
"""
## Quick look at the data
"""
print(df_small_noise.head())
print(df_daily_jumpsup.head())
"""
## Visualize the data
### Timeseries data without anomalies
We will use the following data for training.
"""
fig, ax = plt.subplots()
df_small_noise.plot(legend=False, ax=ax)
plt.show()
"""
### Timeseries data with anomalies
We will use the following data for testing and see if the sudden jump up in the
data is detected as an anomaly.
"""
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
plt.show()
"""
## Prepare training data
Get data values from the training timeseries data file and normalize the
`value` data. We have a `value` for every 5 mins for 14 days.
- 24 * 60 / 5 = **288 timesteps per day**
- 288 * 14 = **4032 data points** in total
"""
# Normalize and save the mean and std we get,
# for normalizing test data.
training_mean = df_small_noise.mean()
training_std = df_small_noise.std()
df_training_value = (df_small_noise - training_mean) / training_std
print("Number of training samples:", len(df_training_value))
"""
### Create sequences
Create sequences combining `TIME_STEPS` contiguous data values from the
training data.
"""
TIME_STEPS = 288
# Generated training sequences for use in the model.
def create_sequences(values, time_steps=TIME_STEPS):
output = []
for i in range(len(values) - time_steps + 1):
output.append(values[i : (i + time_steps)])
return np.stack(output)
x_train = create_sequences(df_training_value.values)
print("Training input shape: ", x_train.shape)
"""
## Build a model
We will build a convolutional reconstruction autoencoder model. The model will
take input of shape `(batch_size, sequence_length, num_features)` and return
output of the same shape. In this case, `sequence_length` is 288 and
`num_features` is 1.
"""
model = keras.Sequential(
[
layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
layers.Conv1D(
filters=32,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Dropout(rate=0.2),
layers.Conv1D(
filters=16,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Conv1DTranspose(
filters=16,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Dropout(rate=0.2),
layers.Conv1DTranspose(
filters=32,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"),
]
)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
model.summary()
"""
## Train the model
Please note that we are using `x_train` as both the input and the target
since this is a reconstruction model.
"""
history = model.fit(
x_train,
x_train,
epochs=50,
batch_size=128,
validation_split=0.1,
callbacks=[
keras.callbacks.EarlyStopping(
monitor="val_loss", patience=5, mode="min"
)
],
)
"""
Let's plot training and validation loss to see how the training went.
"""
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()
plt.show()
"""
## Detecting anomalies
We will detect anomalies by determining how well our model can reconstruct
the input data.
1. Find MAE loss on training samples.
2. Find max MAE loss value. This is the worst our model has performed trying
to reconstruct a sample. We will make this the `threshold` for anomaly
detection.
3. If the reconstruction loss for a sample is greater than this `threshold`
value then we can infer that the model is seeing a pattern that it isn't
familiar with. We will label this sample as an `anomaly`.
"""
# Get train MAE loss.
x_train_pred = model.predict(x_train)
train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel("Train MAE loss")
plt.ylabel("No of samples")
plt.show()
# Get reconstruction loss threshold.
threshold = np.max(train_mae_loss)
print("Reconstruction error threshold: ", threshold)
"""
### Compare recontruction
Just for fun, let's see how our model has recontructed the first sample.
This is the 288 timesteps from day 1 of our training dataset.
"""
# Checking how the first sequence is learnt
plt.plot(x_train[0])
plt.plot(x_train_pred[0])
plt.show()
"""
### Prepare test data
"""
df_test_value = (df_daily_jumpsup - training_mean) / training_std
fig, ax = plt.subplots()
df_test_value.plot(legend=False, ax=ax)
plt.show()
# Create sequences from test values.
x_test = create_sequences(df_test_value.values)
print("Test input shape: ", x_test.shape)
# Get test MAE loss.
x_test_pred = model.predict(x_test)
test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
test_mae_loss = test_mae_loss.reshape((-1))
plt.hist(test_mae_loss, bins=50)
plt.xlabel("test MAE loss")
plt.ylabel("No of samples")
plt.show()
# Detect all the samples which are anomalies.
anomalies = test_mae_loss > threshold
print("Number of anomaly samples: ", np.sum(anomalies))
print("Indices of anomaly samples: ", np.where(anomalies))
"""
## Plot anomalies
We now know the samples of the data which are anomalies. With this, we will
find the corresponding `timestamps` from the original test data. We will be
using the following method to do that:
Let's say time_steps = 3 and we have 10 training values. Our `x_train` will
look like this:
- 0, 1, 2
- 1, 2, 3
- 2, 3, 4
- 3, 4, 5
- 4, 5, 6
- 5, 6, 7
- 6, 7, 8
- 7, 8, 9
All except the initial and the final time_steps-1 data values, will appear in
`time_steps` number of samples. So, if we know that the samples
[(3, 4, 5), (4, 5, 6), (5, 6, 7)] are anomalies, we can say that the data point
5 is an anomaly.
"""
# data i is an anomaly if samples [(i - timesteps + 1) to (i)] are anomalies
anomalous_data_indices = []
for data_idx in range(TIME_STEPS - 1, len(df_test_value) - TIME_STEPS + 1):
if np.all(anomalies[data_idx - TIME_STEPS + 1 : data_idx]):
anomalous_data_indices.append(data_idx)
"""
Let's overlay the anomalies on the original test data plot.
"""
df_subset = df_daily_jumpsup.iloc[anomalous_data_indices]
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
plt.show()
| keras-core/examples/keras_io/timeseries/timeseries_anomaly_detection.py/0 | {
"file_path": "keras-core/examples/keras_io/timeseries/timeseries_anomaly_detection.py",
"repo_id": "keras-core",
"token_count": 3054
} | 24 |
"""
Title: Simple MNIST convnet
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2015/06/19
Last modified: 2020/04/21
Description: A simple convnet that achieves ~99% test accuracy on MNIST.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
from keras_core import layers
"""
## Prepare the data
"""
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
"""
## Build the model
"""
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
"""
## Train the model
"""
batch_size = 128
epochs = 15
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1
)
"""
## Evaluate the trained model
"""
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| keras-core/examples/keras_io/vision/mnist_convnet.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/mnist_convnet.py",
"repo_id": "keras-core",
"token_count": 766
} | 25 |
"""
Title: Transfer learning & fine-tuning
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/15
Last modified: 2023/06/25
Description: Complete guide to transfer learning & fine-tuning in Keras.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
from keras_core import layers
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
"""
## Introduction
**Transfer learning** consists of taking features learned on one problem, and
leveraging them on a new, similar problem. For instance, features from a model that has
learned to identify racoons may be useful to kick-start a model meant to identify
tanukis.
Transfer learning is usually done for tasks where your dataset has too little data to
train a full-scale model from scratch.
The most common incarnation of transfer learning in the context of deep learning is the
following workflow:
1. Take layers from a previously trained model.
2. Freeze them, so as to avoid destroying any of the information they contain during
future training rounds.
3. Add some new, trainable layers on top of the frozen layers. They will learn to turn
the old features into predictions on a new dataset.
4. Train the new layers on your dataset.
A last, optional step, is **fine-tuning**, which consists of unfreezing the entire
model you obtained above (or part of it), and re-training it on the new data with a
very low learning rate. This can potentially achieve meaningful improvements, by
incrementally adapting the pretrained features to the new data.
First, we will go over the Keras `trainable` API in detail, which underlies most
transfer learning & fine-tuning workflows.
Then, we'll demonstrate the typical workflow by taking a model pretrained on the
ImageNet dataset, and retraining it on the Kaggle "cats vs dogs" classification
dataset.
This is adapted from
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python)
and the 2016 blog post
["building powerful image classification models using very little data"](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
"""
"""
## Freezing layers: understanding the `trainable` attribute
Layers & models have three weight attributes:
- `weights` is the list of all weights variables of the layer.
- `trainable_weights` is the list of those that are meant to be updated (via gradient
descent) to minimize the loss during training.
- `non_trainable_weights` is the list of those that aren't meant to be trained.
Typically they are updated by the model during the forward pass.
**Example: the `Dense` layer has 2 trainable weights (kernel & bias)**
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
In general, all weights are trainable weights. The only built-in layer that has
non-trainable weights is the `BatchNormalization` layer. It uses non-trainable weights
to keep track of the mean and variance of its inputs during training.
To learn how to use non-trainable weights in your own custom layers, see the
[guide to writing new layers from scratch](https://keras.io/guides/making_new_layers_and_models_via_subclassing/).
**Example: the `BatchNormalization` layer has 2 trainable weights and 2 non-trainable
weights**
"""
layer = keras.layers.BatchNormalization()
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
Layers & models also feature a boolean attribute `trainable`. Its value can be changed.
Setting `layer.trainable` to `False` moves all the layer's weights from trainable to
non-trainable. This is called "freezing" the layer: the state of a frozen layer won't
be updated during training (either when training with `fit()` or when training with
any custom loop that relies on `trainable_weights` to apply gradient updates).
**Example: setting `trainable` to `False`**
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
layer.trainable = False # Freeze the layer
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
When a trainable weight becomes non-trainable, its value is no longer updated during
training.
"""
# Make a model with 2 layers
layer1 = keras.layers.Dense(3, activation="relu")
layer2 = keras.layers.Dense(3, activation="sigmoid")
model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2])
# Freeze the first layer
layer1.trainable = False
# Keep a copy of the weights of layer1 for later reference
initial_layer1_weights_values = layer1.get_weights()
# Train the model
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# Check that the weights of layer1 have not changed during training
final_layer1_weights_values = layer1.get_weights()
np.testing.assert_allclose(
initial_layer1_weights_values[0], final_layer1_weights_values[0]
)
np.testing.assert_allclose(
initial_layer1_weights_values[1], final_layer1_weights_values[1]
)
"""
Do not confuse the `layer.trainable` attribute with the argument `training` in
`layer.__call__()` (which controls whether the layer should run its forward pass in
inference mode or training mode). For more information, see the
[Keras FAQ](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
"""
"""
## Recursive setting of the `trainable` attribute
If you set `trainable = False` on a model or on any layer that has sublayers,
all children layers become non-trainable as well.
**Example:**
"""
inner_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(3, activation="relu"),
]
)
model = keras.Sequential(
[
keras.Input(shape=(3,)),
inner_model,
keras.layers.Dense(3, activation="sigmoid"),
]
)
model.trainable = False # Freeze the outer model
assert inner_model.trainable == False # All layers in `model` are now frozen
assert (
inner_model.layers[0].trainable == False
) # `trainable` is propagated recursively
"""
## The typical transfer-learning workflow
This leads us to how a typical transfer learning workflow can be implemented in Keras:
1. Instantiate a base model and load pre-trained weights into it.
2. Freeze all layers in the base model by setting `trainable = False`.
3. Create a new model on top of the output of one (or several) layers from the base
model.
4. Train your new model on your new dataset.
Note that an alternative, more lightweight workflow could also be:
1. Instantiate a base model and load pre-trained weights into it.
2. Run your new dataset through it and record the output of one (or several) layers
from the base model. This is called **feature extraction**.
3. Use that output as input data for a new, smaller model.
A key advantage of that second workflow is that you only run the base model once on
your data, rather than once per epoch of training. So it's a lot faster & cheaper.
An issue with that second workflow, though, is that it doesn't allow you to dynamically
modify the input data of your new model during training, which is required when doing
data augmentation, for instance. Transfer learning is typically used for tasks when
your new dataset has too little data to train a full-scale model from scratch, and in
such scenarios data augmentation is very important. So in what follows, we will focus
on the first workflow.
Here's what the first workflow looks like in Keras:
First, instantiate a base model with pre-trained weights.
```python
base_model = keras.applications.Xception(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
```
Then, freeze the base model.
```python
base_model.trainable = False
```
Create a new model on top.
```python
inputs = keras.Input(shape=(150, 150, 3))
# We make sure that the base_model is running in inference mode here,
# by passing `training=False`. This is important for fine-tuning, as you will
# learn in a few paragraphs.
x = base_model(inputs, training=False)
# Convert features of shape `base_model.output_shape[1:]` to vectors
x = keras.layers.GlobalAveragePooling2D()(x)
# A Dense classifier with a single unit (binary classification)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
```
Train the model on new data.
```python
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
model.fit(new_dataset, epochs=20, callbacks=..., validation_data=...)
```
"""
"""
## Fine-tuning
Once your model has converged on the new data, you can try to unfreeze all or part of
the base model and retrain the whole model end-to-end with a very low learning rate.
This is an optional last step that can potentially give you incremental improvements.
It could also potentially lead to quick overfitting -- keep that in mind.
It is critical to only do this step *after* the model with frozen layers has been
trained to convergence. If you mix randomly-initialized trainable layers with
trainable layers that hold pre-trained features, the randomly-initialized layers will
cause very large gradient updates during training, which will destroy your pre-trained
features.
It's also critical to use a very low learning rate at this stage, because
you are training a much larger model than in the first round of training, on a dataset
that is typically very small.
As a result, you are at risk of overfitting very quickly if you apply large weight
updates. Here, you only want to readapt the pretrained weights in an incremental way.
This is how to implement fine-tuning of the whole base model:
```python
# Unfreeze the base model
base_model.trainable = True
# It's important to recompile your model after you make any changes
# to the `trainable` attribute of any inner layer, so that your changes
# are take into account
model.compile(optimizer=keras.optimizers.Adam(1e-5), # Very low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
# Train end-to-end. Be careful to stop before you overfit!
model.fit(new_dataset, epochs=10, callbacks=..., validation_data=...)
```
**Important note about `compile()` and `trainable`**
Calling `compile()` on a model is meant to "freeze" the behavior of that model. This
implies that the `trainable`
attribute values at the time the model is compiled should be preserved throughout the
lifetime of that model,
until `compile` is called again. Hence, if you change any `trainable` value, make sure
to call `compile()` again on your
model for your changes to be taken into account.
**Important notes about `BatchNormalization` layer**
Many image models contain `BatchNormalization` layers. That layer is a special case on
every imaginable count. Here are a few things to keep in mind.
- `BatchNormalization` contains 2 non-trainable weights that get updated during
training. These are the variables tracking the mean and variance of the inputs.
- When you set `bn_layer.trainable = False`, the `BatchNormalization` layer will
run in inference mode, and will not update its mean & variance statistics. This is not
the case for other layers in general, as
[weight trainability & inference/training modes are two orthogonal concepts](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
But the two are tied in the case of the `BatchNormalization` layer.
- When you unfreeze a model that contains `BatchNormalization` layers in order to do
fine-tuning, you should keep the `BatchNormalization` layers in inference mode by
passing `training=False` when calling the base model.
Otherwise the updates applied to the non-trainable weights will suddenly destroy
what the model has learned.
You'll see this pattern in action in the end-to-end example at the end of this guide.
"""
"""
## An end-to-end example: fine-tuning an image classification model on a cats vs. dogs dataset
To solidify these concepts, let's walk you through a concrete end-to-end transfer
learning & fine-tuning example. We will load the Xception model, pre-trained on
ImageNet, and use it on the Kaggle "cats vs. dogs" classification dataset.
"""
"""
### Getting the data
First, let's fetch the cats vs. dogs dataset using TFDS. If you have your own dataset,
you'll probably want to use the utility
`keras.utils.image_dataset_from_directory` to generate similar labeled
dataset objects from a set of images on disk filed into class-specific folders.
Transfer learning is most useful when working with very small datasets. To keep our
dataset small, we will use 40% of the original training data (25,000 images) for
training, 10% for validation, and 10% for testing.
"""
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
# Reserve 10% for validation and 10% for test
split=["train[:40%]", "train[40%:50%]", "train[50%:60%]"],
as_supervised=True, # Include labels
)
print(f"Number of training samples: {train_ds.cardinality()}")
print(f"Number of validation samples: {validation_ds.cardinality()}")
print(f"Number of test samples: {test_ds.cardinality()}")
"""
These are the first 9 images in the training dataset -- as you can see, they're all
different sizes.
"""
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
"""
We can also see that label 1 is "dog" and label 0 is "cat".
"""
"""
### Standardizing the data
Our raw images have a variety of sizes. In addition, each pixel consists of 3 integer
values between 0 and 255 (RGB level values). This isn't a great fit for feeding a
neural network. We need to do 2 things:
- Standardize to a fixed image size. We pick 150x150.
- Normalize pixel values between -1 and 1. We'll do this using a `Normalization` layer as
part of the model itself.
In general, it's a good practice to develop models that take raw data as input, as
opposed to models that take already-preprocessed data. The reason being that, if your
model expects preprocessed data, any time you export your model to use it elsewhere
(in a web browser, in a mobile app), you'll need to reimplement the exact same
preprocessing pipeline. This gets very tricky very quickly. So we should do the least
possible amount of preprocessing before hitting the model.
Here, we'll do image resizing in the data pipeline (because a deep neural network can
only process contiguous batches of data), and we'll do the input value scaling as part
of the model, when we create it.
Let's resize images to 150x150:
"""
resize_fn = keras.layers.Resizing(150, 150)
train_ds = train_ds.map(lambda x, y: (resize_fn(x), y))
validation_ds = validation_ds.map(lambda x, y: (resize_fn(x), y))
test_ds = test_ds.map(lambda x, y: (resize_fn(x), y))
"""
### Using random data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to
the training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
"""
augmentation_layers = [
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
def data_augmentation(x):
for layer in augmentation_layers:
x = layer(x)
return x
train_ds = train_ds.map(lambda x, y: (data_augmentation(x), y))
"""
Let's batch the data and use prefetching to optimize loading speed.
"""
from tensorflow import data as tf_data
batch_size = 64
train_ds = train_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
validation_ds = (
validation_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
)
test_ds = test_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
"""
Let's visualize what the first image of the first batch looks like after various random
transformations:
"""
for images, labels in train_ds.take(1):
plt.figure(figsize=(10, 10))
first_image = images[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(np.expand_dims(first_image, 0))
plt.imshow(np.array(augmented_image[0]).astype("int32"))
plt.title(int(labels[0]))
plt.axis("off")
"""
## Build a model
Now let's built a model that follows the blueprint we've explained earlier.
Note that:
- We add a `Rescaling` layer to scale input values (initially in the `[0, 255]`
range) to the `[-1, 1]` range.
- We add a `Dropout` layer before the classification layer, for regularization.
- We make sure to pass `training=False` when calling the base model, so that
it runs in inference mode, so that batchnorm statistics don't get updated
even after we unfreeze the base model for fine-tuning.
"""
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False,
) # Do not include the ImageNet classifier at the top.
# Freeze the base_model
base_model.trainable = False
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
# Pre-trained Xception weights requires that input be scaled
# from (0, 255) to a range of (-1., +1.), the rescaling layer
# outputs: `(inputs * scale) + offset`
scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1)
x = scale_layer(inputs)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary(show_trainable=True)
"""
## Train the top layer
"""
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 2
print("Fitting the top layer of the model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
## Do a round of fine-tuning of the entire model
Finally, let's unfreeze the base model and train the entire model end-to-end with a low
learning rate.
Importantly, although the base model becomes trainable, it is still running in
inference mode since we passed `training=False` when calling it when we built the
model. This means that the batch normalization layers inside won't update their batch
statistics. If they did, they would wreck havoc on the representations learned by the
model so far.
"""
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary(show_trainable=True)
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 1
print("Fitting the end-to-end model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
After 10 epochs, fine-tuning gains us a nice improvement here.
Let's evaluate the model on the test dataset:
"""
print("Test dataset evaluation")
model.evaluate(test_ds)
| keras-core/guides/transfer_learning.py/0 | {
"file_path": "keras-core/guides/transfer_learning.py",
"repo_id": "keras-core",
"token_count": 6061
} | 26 |
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/vgg16/"
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_core_export(
["keras_core.applications.vgg16.VGG16", "keras_core.applications.VGG16"]
)
def VGG16(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the VGG16 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG16, call `keras_core.applications.vgg16.preprocess_input` on your
inputs before passing them to the model.
`vgg16.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet
dataset, without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format) or
`(3, 224, 224)` (with `"channels_first"` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. Received: "
f"weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv1"
)(img_input)
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block1_pool")(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv1"
)(x)
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block2_pool")(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv1"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv2"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block3_pool")(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block4_pool")(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block5_pool")(x)
if include_top:
# Classification block
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name="vgg16")
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="64373286793e3c8b2b4e3219cbf3544b",
)
else:
weights_path = file_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="6d6bbae143d832006294945121d1f1fc",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_core_export("keras_core.applications.vgg16.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="caffe"
)
@keras_core_export("keras_core.applications.vgg16.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/vgg16.py/0 | {
"file_path": "keras-core/keras_core/applications/vgg16.py",
"repo_id": "keras-core",
"token_count": 4022
} | 27 |
import numpy as np
from keras_core.api_export import keras_core_export
from keras_core.backend import config
from keras_core.backend.common import global_state
from keras_core.backend.common.name_scope import current_path
from keras_core.backend.common.stateless_scope import get_stateless_scope
from keras_core.backend.common.stateless_scope import in_stateless_scope
from keras_core.utils.naming import auto_name
class KerasVariable:
def __init__(
self, initializer, shape=None, dtype=None, trainable=True, name=None
):
name = name or auto_name(self.__class__.__name__)
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
"cannot contain character `/`. "
f"Received: name={name}"
)
self.name = name
parent_path = current_path()
if parent_path:
self.path = current_path() + "/" + self.name
else:
self.path = self.name
dtype = standardize_dtype(dtype)
self._dtype = dtype
self._shape = None
self._initializer = None
self.trainable = trainable
if callable(initializer):
if shape is None:
raise ValueError(
"When creating a Variable from an initializer, "
"the `shape` argument should be specified. "
f"Received: initializer={initializer} "
f"and shape={shape}"
)
if in_stateless_scope():
if callable(initializer):
self._value = None
self._initializer = initializer
self._shape = standardize_shape(shape)
register_uninitialized_variable(self)
else:
raise ValueError(
"You are attempting to create a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are created "
"before you start using your layer/model objects.\n\n"
"In some cases, you might be seeing this error "
"because you need to "
"implement a `def build(self, input_shape)` method "
"on your layer/model, which will "
"create its variables.\n\n"
"In some other cases, you might be seeing this error "
"because you are instantiating a `Variable` and "
"assigning it to a layer without going through "
"self.add_variable()/self.add_weight(). Always prefer "
"using these methods "
"(with a `shape` and `initializer` argument)."
)
else:
if callable(initializer):
value = initializer(shape, dtype=dtype)
else:
value = initializer
self._initialize(value)
self._shape = tuple(self._value.shape)
self._ndim = len(self._shape)
def _deferred_initialize(self):
if self._value is not None:
raise ValueError(f"Variable {self.path} is already initialized.")
if in_stateless_scope():
raise ValueError(
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are initialized "
"before you start using your layer/model objects."
)
value = self._initializer(self._shape, dtype=self._dtype)
self._initialize(value)
def _maybe_autocast(self, value):
autocast_scope = get_autocast_scope()
if autocast_scope is not None:
return autocast_scope.maybe_cast(value)
return value
def numpy(self):
return np.array(self)
@property
def value(self):
if in_stateless_scope():
scope = get_stateless_scope()
value = scope.get_current_value(self)
if value is not None:
return self._maybe_autocast(value)
if self._value is None:
# Unitialized variable. Return a placeholder.
# This is fine because it's only ever used
# in during shape inference / graph tracing
# (anything else would be a bug, to be fixed.)
return self._maybe_autocast(
self._initializer(self._shape, dtype=self._dtype)
)
return self._maybe_autocast(self._value)
def assign(self, value):
value = self._convert_to_tensor(value, dtype=self.dtype)
if not shape_equal(value.shape, self.shape):
raise ValueError(
"The shape of the target variable and "
"the shape of the target value in "
"`variable.assign(value)` must match. "
f"variable.shape={self.value.shape}, "
f"Received: value.shape={value.shape}. "
f"Target variable: {self}"
)
if in_stateless_scope():
scope = get_stateless_scope()
scope.add_update((self, value))
else:
self._direct_assign(value)
def assign_add(self, value):
self.assign(self + value)
def assign_sub(self, value):
self.assign(self - value)
@property
def dtype(self):
autocast_scope = get_autocast_scope()
if autocast_scope is not None and is_float_dtype(self._dtype):
return autocast_scope.dtype
return self._dtype
@property
def shape(self):
return self._shape
@property
def ndim(self):
return self._ndim
def __repr__(self):
return (
f"<KerasVariable shape={self.shape}, dtype={self.dtype}, "
f"path={self.path}>"
)
def _initialize(self, value):
raise NotImplementedError
def _convert_to_tensor(self, value, dtype=None):
raise NotImplementedError
def __getitem__(self, idx):
return self.value.__getitem__(idx)
def __array__(self, dtype=None):
return self.value.__array__(dtype)
def __bool__(self):
raise TypeError("A Keras Variable cannot be used as a boolean.")
def __neg__(self):
return self.value.__neg__()
def __pos__(self):
return self.value.__pos__()
def __abs__(self):
return self.value.__abs__()
def __invert__(self):
return self.value.__invert__()
def __eq__(self, other):
value = self.value
return value.__eq__(self._convert_to_tensor(other, dtype=value.dtype))
def __ne__(self, other):
value = self.value
return value.__ne__(self._convert_to_tensor(other, dtype=value.dtype))
def __lt__(self, other):
value = self.value
return value.__lt__(self._convert_to_tensor(other, dtype=value.dtype))
def __le__(self, other):
value = self.value
return value.__le__(self._convert_to_tensor(other, dtype=value.dtype))
def __gt__(self, other):
value = self.value
return value.__gt__(self._convert_to_tensor(other, dtype=value.dtype))
def __ge__(self, other):
value = self.value
return value.__ge__(self._convert_to_tensor(other, dtype=value.dtype))
def __add__(self, other):
value = self.value
return value.__add__(self._convert_to_tensor(other, dtype=value.dtype))
def __radd__(self, other):
value = self.value
return value.__radd__(self._convert_to_tensor(other, dtype=value.dtype))
def __sub__(self, other):
value = self.value
return value.__sub__(self._convert_to_tensor(other, dtype=value.dtype))
def __rsub__(self, other):
value = self.value
return value.__rsub__(self._convert_to_tensor(other, dtype=value.dtype))
def __mul__(self, other):
value = self.value
return value.__mul__(self._convert_to_tensor(other, dtype=value.dtype))
def __rmul__(self, other):
value = self.value
return value.__rmul__(self._convert_to_tensor(other, dtype=value.dtype))
def __div__(self, other):
value = self.value
return value.__div__(self._convert_to_tensor(other, dtype=value.dtype))
def __rdiv__(self, other):
value = self.value
return value.__rdiv__(self._convert_to_tensor(other, dtype=value.dtype))
def __truediv__(self, other):
value = self.value
return value.__truediv__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rtruediv__(self, other):
value = self.value
return value.__rtruediv__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __floordiv__(self, other):
value = self.value
return value.__floordiv__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rfloordiv__(self, other):
value = self.value
return value.__rfloordiv__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __divmod__(self, other):
value = self.value
return value.__divmod__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rdivmod__(self, other):
value = self.value
return value.__rdivmod__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __mod__(self, other):
value = self.value
return value.__mod__(self._convert_to_tensor(other, dtype=value.dtype))
def __rmod__(self, other):
value = self.value
return value.__rmod__(self._convert_to_tensor(other, dtype=value.dtype))
def __pow__(self, other):
value = self.value
return value.__pow__(self._convert_to_tensor(other, dtype=value.dtype))
def __rpow__(self, other):
value = self.value
return value.__rpow__(self._convert_to_tensor(other, dtype=value.dtype))
def __matmul__(self, other):
value = self.value
return value.__matmul__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rmatmul__(self, other):
value = self.value
return value.__rmatmul__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __and__(self, other):
value = self.value
return value.__and__(self._convert_to_tensor(other, dtype=value.dtype))
def __rand__(self, other):
value = self.value
return value.__rand__(self._convert_to_tensor(other, dtype=value.dtype))
def __or__(self, other):
value = self.value
return value.__or__(self._convert_to_tensor(other, dtype=value.dtype))
def __ror__(self, other):
value = self.value
return value.__ror__(self._convert_to_tensor(other, dtype=value.dtype))
def __xor__(self, other):
value = self.value
return value.__xor__(self._convert_to_tensor(other, dtype=value.dtype))
def __rxor__(self, other):
value = self.value
return value.__rxor__(self._convert_to_tensor(other, dtype=value.dtype))
def __lshift__(self, other):
value = self.value
return value.__lshift__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rlshift__(self, other):
value = self.value
return value.__rlshift__(
self._convert_to_tensor(other, dtype=self.dtype)
)
def __rshift__(self, other):
value = self.value
return value.__rshift__(
self._convert_to_tensor(other, dtype=value.dtype)
)
def __rrshift__(self, other):
value = self.value
return value.__rrshift__(
self._convert_to_tensor(other, dtype=self.dtype)
)
def __round__(self, ndigits=None):
value = self.value
return value.__round__(ndigits)
def register_uninitialized_variable(variable):
uninitialized_variables = global_state.get_global_attribute(
"uninitialized_variables", [], set_to_default=True
)
uninitialized_variables.append(variable)
def initialize_all_variables():
collection = global_state.get_global_attribute("uninitialized_variables")
if collection:
for v in collection:
v._deferred_initialize()
global_state.set_global_attribute("uninitialized_variables", [])
ALLOWED_DTYPES = {
"float16",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
"bfloat16",
"bool",
"string",
}
PYTHON_DTYPES_MAP = {
bool: "bool",
int: "int64" if config.backend() == "tensorflow" else "int32",
float: "float32",
str: "string",
# special case for string value
"int": "int64" if config.backend() == "tensorflow" else "int32",
}
@keras_core_export("keras_core.backend.standardize_dtype")
def standardize_dtype(dtype):
if dtype is None:
return config.floatx()
dtype = PYTHON_DTYPES_MAP.get(dtype, dtype)
if hasattr(dtype, "name"):
dtype = dtype.name
elif hasattr(dtype, "__str__") and "torch" in str(dtype):
dtype = str(dtype).split(".")[-1]
if dtype not in ALLOWED_DTYPES:
raise ValueError(f"Invalid dtype: {dtype}")
return dtype
def standardize_shape(shape):
if not isinstance(shape, tuple):
if shape is None:
raise ValueError("Undefined shapes are not supported.")
if not hasattr(shape, "__iter__"):
raise ValueError(f"Cannot convert '{shape}' to a shape.")
shape = tuple(shape)
if config.backend() == "torch":
# `shape` might be `torch.Size`. We need to convert the items in it to
# either int or `None`
shape = tuple(map(lambda x: int(x) if x is not None else None, shape))
for e in shape:
if e is None:
continue
if config.backend() == "jax" and str(e) == "b":
# JAX2TF tracing represents `None` dimensions as `b`
continue
if not isinstance(e, int):
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
f"Found invalid entry '{e}'. "
)
if e < 0:
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
"Negative dimensions are not allowed."
)
return shape
def shape_equal(a_shape, b_shape):
"""Return whether a_shape == b_shape (allows None entries)."""
if len(a_shape) != len(b_shape):
return False
for e1, e2 in zip(a_shape, b_shape):
if e1 is not None and e2 is not None and e1 != e2:
return False
return True
@keras_core_export("keras_core.backend.is_float_dtype")
def is_float_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("float") or dtype.startswith("bfloat")
@keras_core_export("keras_core.backend.is_int_dtype")
def is_int_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("int") or dtype.startswith("uint")
def get_autocast_scope():
return global_state.get_global_attribute("autocast_scope")
class AutocastScope:
"""Context manager that enables the autocasting of float variables.
Under this context manager, float `KerasVariables`s will be cast to `dtype`
(note that `dtype` must also be float).
"""
def __init__(self, dtype):
if dtype is not None:
dtype = standardize_dtype(dtype)
if not is_float_dtype(dtype):
raise ValueError(
"`AutocastScope` can only be used with "
"a floating-point target dtype, such as 'float16'. "
f"Received: dtype={dtype}"
)
self.dtype = dtype
self.original_scope = None
def maybe_cast(self, value):
from keras_core import backend
if self.dtype is not None and is_float_dtype(value.dtype):
return backend.cast(value, dtype=self.dtype)
return value
def __enter__(self):
self.original_scope = get_autocast_scope()
global_state.set_global_attribute("autocast_scope", self)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute("autocast_scope", self.original_scope)
| keras-core/keras_core/backend/common/variables.py/0 | {
"file_path": "keras-core/keras_core/backend/common/variables.py",
"repo_id": "keras-core",
"token_count": 7664
} | 28 |
import numpy as np
import tree
from keras_core.backend.common import KerasVariable
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.backend.common.stateless_scope import StatelessScope
from keras_core.utils.nest import pack_sequence_as
SUPPORTS_SPARSE_TENSORS = False
class Variable(KerasVariable):
def _initialize(self, value):
self._value = np.array(value, dtype=self._dtype)
def _direct_assign(self, value):
self._value = np.array(value, dtype=self._dtype)
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
# Overload native accessor.
def __array__(self):
return self.value
def convert_to_tensor(x, dtype=None, sparse=False):
if sparse:
raise ValueError("`sparse=True` is not supported with numpy backend")
if dtype is not None:
dtype = standardize_dtype(dtype)
if isinstance(x, Variable):
if dtype and dtype != x.dtype:
return x.value.astype(dtype)
return x.value
return np.array(x, dtype=dtype)
def convert_to_numpy(x):
return np.array(x)
def is_tensor(x):
if isinstance(x, (np.generic, np.ndarray)):
return True
return False
def shape(x):
return x.shape
def cast(x, dtype):
return convert_to_tensor(x, dtype=dtype)
def cond(pred, true_fn, false_fn):
if pred:
return true_fn()
return false_fn()
def vectorized_map(function, elements):
if len(elements) == 1:
return function(elements)
else:
batch_size = elements[0].shape[0]
output_store = list()
for index in range(batch_size):
output_store.append(function([x[index] for x in elements]))
return np.stack(output_store)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope():
def has_none_shape(x):
if isinstance(x, KerasTensor):
return None in x.shape
return False
none_in_shape = any(map(has_none_shape, tree.flatten((args, kwargs))))
def convert_keras_tensor_to_numpy(x, fill_value=None):
if isinstance(x, KerasTensor):
shape = list(x.shape)
if fill_value:
for i, e in enumerate(shape):
if e is None:
shape[i] = fill_value
return np.empty(
shape=shape,
dtype=x.dtype,
)
return x
args_1, kwargs_1 = tree.map_structure(
lambda x: convert_keras_tensor_to_numpy(x, fill_value=83),
(args, kwargs),
)
outputs_1 = fn(*args_1, **kwargs_1)
outputs = outputs_1
if none_in_shape:
args_2, kwargs_2 = tree.map_structure(
lambda x: convert_keras_tensor_to_numpy(x, fill_value=89),
(args, kwargs),
)
outputs_2 = fn(*args_2, **kwargs_2)
flat_out_1 = tree.flatten(outputs_1)
flat_out_2 = tree.flatten(outputs_2)
flat_out = []
for x1, x2 in zip(flat_out_1, flat_out_2):
shape = list(x1.shape)
for i, e in enumerate(x2.shape):
if e != shape[i]:
shape[i] = None
flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype)))
outputs = pack_sequence_as(outputs_1, flat_out)
def convert_numpy_to_keras_tensor(x):
if is_tensor(x):
return KerasTensor(x.shape, standardize_dtype(x.dtype))
return x
output_spec = tree.map_structure(convert_numpy_to_keras_tensor, outputs)
return output_spec
def scatter(indices, values, shape):
indices = convert_to_tensor(indices)
values = convert_to_tensor(values)
zeros = np.zeros(shape, dtype=values.dtype)
index_length = indices.shape[-1]
value_shape = shape[index_length:]
indices = np.reshape(indices, [-1, index_length])
values = np.reshape(values, [-1] + list(value_shape))
for i in range(indices.shape[0]):
index = indices[i]
zeros[tuple(index)] += values[i]
return zeros
def scatter_update(inputs, indices, updates):
indices = np.array(indices)
indices = np.transpose(indices)
inputs[tuple(indices)] = updates
return inputs
def slice(inputs, start_indices, lengths):
# Validate inputs
assert len(start_indices) == len(lengths)
# Generate list of indices arrays for each dimension
indices = [
np.arange(start, start + length)
for start, length in zip(start_indices, lengths)
]
# Use np.ix_ to create a multidimensional index array
mesh = np.ix_(*indices)
return inputs[mesh]
def slice_update(inputs, start_indices, updates):
# Generate list of indices arrays for each dimension
indices = [
np.arange(start, start + length)
for start, length in zip(start_indices, updates.shape)
]
# Use np.ix_ to create a multidimensional index array
mesh = np.ix_(*indices)
inputs[mesh] = updates
return inputs
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
current_iter = 0
iteration_check = (
lambda iter: maximum_iterations is None or iter < maximum_iterations
)
loop_vars = tuple([convert_to_tensor(v) for v in loop_vars])
while cond(*loop_vars) and iteration_check(current_iter):
loop_vars = body(*loop_vars)
if not isinstance(loop_vars, (list, tuple)):
loop_vars = (loop_vars,)
loop_vars = tuple(loop_vars)
current_iter += 1
return loop_vars
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
def stop_gradient(x):
return x
def unstack(x, num=None, axis=0):
x = np.moveaxis(x, axis, 0)
return [x[i] for i in range(x.shape[0])]
| keras-core/keras_core/backend/numpy/core.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/core.py",
"repo_id": "keras-core",
"token_count": 2827
} | 29 |
import warnings
import tensorflow as tf
from keras_core.backend import standardize_data_format
from keras_core.backend import standardize_dtype
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras_core.backend.config import epsilon
from keras_core.backend.tensorflow.core import cast
def relu(x):
return tf.nn.relu(x)
def relu6(x):
return tf.nn.relu6(x)
def sigmoid(x):
logits = x
output = tf.nn.sigmoid(x)
output._keras_logits = logits
return output
def tanh(x):
return tf.nn.tanh(x)
def softplus(x):
return tf.math.softplus(x)
def softsign(x):
return tf.nn.softsign(x)
def silu(x, beta=1.0):
return tf.nn.silu(x, beta=beta)
def log_sigmoid(x):
return tf.math.log_sigmoid(x)
def leaky_relu(x, negative_slope=0.2):
return tf.nn.leaky_relu(x, alpha=negative_slope)
def hard_sigmoid(x):
x = x / 6.0 + 0.5
return tf.clip_by_value(x, 0.0, 1.0)
def elu(x, alpha=1.0):
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
def selu(x):
return tf.nn.selu(x)
def gelu(x, approximate=True):
return tf.nn.gelu(x, approximate)
def softmax(x, axis=-1):
logits = x
if axis is None:
# Unlike numpy, tf will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = tf.reshape(x, [-1])
output = tf.nn.softmax(output, axis=-1)
output = tf.reshape(output, tf.shape(x))
else:
output = tf.nn.softmax(x, axis=axis)
output._keras_logits = logits
return output
def log_softmax(x, axis=-1):
if axis is None:
# Unlike numpy, tf will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = tf.reshape(x, [-1])
output = tf.nn.log_softmax(output, axis=-1)
return tf.reshape(output, tf.shape(x))
return tf.nn.log_softmax(x, axis=axis)
def _transpose_spatial_inputs(inputs):
num_spatial_dims = len(inputs.shape) - 2
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
if num_spatial_dims == 1:
inputs = tf.transpose(inputs, (0, 2, 1))
elif num_spatial_dims == 2:
inputs = tf.transpose(inputs, (0, 2, 3, 1))
elif num_spatial_dims == 3:
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
else:
raise ValueError(
"Pooling inputs's shape must be 3, 4 or 5, corresponding to 1D, 2D "
f"and 3D inputs. But received shape: {inputs.shape}."
)
return inputs
def _transpose_spatial_outputs(outputs):
# Undo the tranpose in `_transpose_spatial_inputs`.
num_spatial_dims = len(outputs.shape) - 2
if num_spatial_dims == 1:
outputs = tf.transpose(outputs, (0, 2, 1))
elif num_spatial_dims == 2:
outputs = tf.transpose(outputs, (0, 3, 1, 2))
elif num_spatial_dims == 3:
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = standardize_data_format(data_format)
strides = pool_size if strides is None else strides
padding = padding.upper()
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
if data_format == "channels_first":
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
inputs = _transpose_spatial_inputs(inputs)
outputs = tf.nn.max_pool(
inputs,
pool_size,
strides,
padding,
tf_data_format,
)
if data_format == "channels_first":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = standardize_data_format(data_format)
strides = pool_size if strides is None else strides
padding = padding.upper()
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
if data_format == "channels_first":
# Tensorflow pooling does not support `channels_first` format, so
# we need to transpose to `channels_last` format.
inputs = _transpose_spatial_inputs(inputs)
outputs = tf.nn.avg_pool(
inputs,
pool_size,
strides,
padding,
tf_data_format,
)
if data_format == "channels_first":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def _convert_data_format(data_format, ndim):
if data_format == "channels_last":
if ndim == 3:
return "NWC"
elif ndim == 4:
return "NHWC"
elif ndim == 5:
return "NDHWC"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
elif data_format == "channels_first":
if ndim == 3:
return "NCW"
elif ndim == 4:
return "NCHW"
elif ndim == 5:
return "NCDHW"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
else:
raise ValueError(
f"Invalid data_format: {data_format}. "
'Expected values are ["channels_first", "channels_last"]'
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
def _conv():
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
return tf.nn.convolution(
inputs,
kernel,
strides,
padding.upper(),
data_format=tf_data_format,
dilations=dilation_rate,
)
# Reason for making this function is in Tensorflow, `groups > 1` does not
# work on CPU for `tf.nn.convolution`, but wrapping it by XLA works.
@tf.function(jit_compile=True)
def _conv_xla():
return _conv()
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
if channels != kernel.shape[-2]:
# If kernel's in_channel does not match input's channels, it indicates
# convolution is broken down into groups.
return _conv_xla()
return _conv()
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = len(inputs.shape) - 2
if num_spatial_dims > 2:
raise ValueError(
"`inputs` rank must be 3 (1D conv) or 4 (2D conv). Received: "
"{inputs.ndim}."
)
# Because we use `tf.nn.depthwise_conv2d` for both 1D and 2D convs, we set
# `tf_data_format` using 2D conv format.
tf_data_format = _convert_data_format(data_format, 4)
padding = padding.upper()
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if num_spatial_dims == 1:
# 1D depthwise conv.
if data_format == "channels_last":
strides = (1,) + strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides * 2
spatial_start_dim = 2
inputs = tf.expand_dims(inputs, spatial_start_dim)
kernel = tf.expand_dims(kernel, axis=0)
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
outputs = tf.nn.depthwise_conv2d(
inputs,
kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
return tf.squeeze(outputs, [spatial_start_dim])
if data_format == "channels_last":
strides = (1,) + strides + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides
spatial_start_dim = 2
return tf.nn.depthwise_conv2d(
inputs,
kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = len(inputs.shape) - 2
if num_spatial_dims > 2:
raise ValueError(
"`num_spatial_dims` must be 1 or 2. Received: "
f"num_spatial_dims={num_spatial_dims}."
)
# Because we use `tf.nn.separable_conv2d` for both 1D and 2D convs, we set
# `tf_data_format` using 2D conv format.
tf_data_format = _convert_data_format(data_format, 4)
padding = padding.upper()
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if num_spatial_dims == 1:
# 1D depthwise conv.
if data_format == "channels_last":
strides = (1,) + strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + strides * 2
spatial_start_dim = 2
inputs = tf.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = tf.expand_dims(depthwise_kernel, axis=0)
pointwise_kernel = tf.expand_dims(pointwise_kernel, axis=0)
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
outputs = tf.nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
return tf.squeeze(outputs, [spatial_start_dim])
if data_format == "channels_last":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
return tf.nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format=tf_data_format,
dilations=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
kernel_size = kernel.shape[:-2]
filters = kernel.shape[-2]
input_shape = list(inputs.shape)
symbolic_shape = tf.shape(inputs)
for i, e in enumerate(input_shape):
if e is None:
input_shape[i] = symbolic_shape[i]
output_shape = compute_conv_transpose_output_shape(
input_shape,
kernel_size,
filters,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
return tf.nn.conv_transpose(
inputs,
kernel,
output_shape,
strides,
padding=padding.upper(),
data_format=tf_data_format,
dilations=dilation_rate,
)
def one_hot(x, num_classes, axis=-1, dtype="float32"):
return tf.one_hot(x, num_classes, axis=axis, dtype=dtype)
def multi_hot(x, num_classes, axis=-1, dtype="float32"):
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = tf.reduce_max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
return outputs
def _get_logits(output, from_logits, op_type, fn_name):
"""Retrieves logits tensor from maybe-softmax or maybe-sigmoid tensor."""
output_ = output
from_logits_ = from_logits
has_keras_logits = hasattr(output, "_keras_logits")
if has_keras_logits:
output_ = output._keras_logits
from_logits_ = True
from_expected_op_type = (
not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))
and output.op.type == op_type
) and not has_keras_logits
if from_expected_op_type:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output_ = output.op.inputs[0]
from_logits_ = True
if from_logits and (has_keras_logits or from_expected_op_type):
warnings.warn(
f'"`{fn_name}` received `from_logits=True`, but '
f"the `output` argument was produced by a {op_type} "
"activation and thus does not represent logits. "
"Was this intended?",
stacklevel=2,
)
return output_, from_logits_
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Args:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is `True`, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last`, and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Example:
>>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
>>> print(a)
tf.Tensor(
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]], shape=(3, 3), dtype=float32)
>>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94],
... shape=[3, 3])
>>> print(b)
tf.Tensor(
[[0.9 0.05 0.05]
[0.05 0.89 0.06]
[0.05 0.01 0.94]], shape=(3, 3), dtype=float32)
>>> loss = categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.11653 0.06188]
>>> loss = categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0.]
"""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) != len(output.shape):
raise ValueError(
"Arguments `target` and `output` must have the same rank "
"(ndim). Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
for e1, e2 in zip(target.shape, output.shape):
if e1 is not None and e2 is not None and e1 != e2:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
output, from_logits = _get_logits(
output, from_logits, "Softmax", "categorical_crossentropy"
)
if from_logits:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output, axis=axis
)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = output / tf.reduce_sum(output, axis, keepdims=True)
# Compute cross entropy from probabilities.
output = tf.clip_by_value(output, epsilon(), 1.0 - epsilon())
return -tf.reduce_sum(target * tf.math.log(output), axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Args:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last`, and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
"""
if axis != -1 and axis != len(output.shape) - 1:
raise ValueError(
f"Only axis=-1 is currently supported. Received: axis={axis}"
)
target = tf.convert_to_tensor(target)
target = tf.cast(target, dtype="int64")
output = tf.convert_to_tensor(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = tf.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if len(target.shape) != len(output.shape[:-1]):
raise ValueError(
"Argument `output` must have rank (ndim) `target.ndim - 1`. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
for e1, e2 in zip(target.shape, output.shape[:-1]):
if e1 is not None and e2 is not None and e1 != e2:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
output, from_logits = _get_logits(
output, from_logits, "Softmax", "sparse_categorical_crossentropy"
)
if not from_logits:
output = tf.clip_by_value(output, epsilon(), 1 - epsilon())
output = tf.math.log(output)
result = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=output
)
return result
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Args:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
if len(target.shape) != len(output.shape):
raise ValueError(
"Arguments `target` and `output` must have the same rank "
"(ndim). Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
for e1, e2 in zip(target.shape, output.shape):
if e1 is not None and e2 is not None and e1 != e2:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
output, from_logits = _get_logits(
output, from_logits, "Sigmoid", "binary_crossentropy"
)
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=output
)
# Compute cross entropy from probabilities.
output = tf.clip_by_value(output, epsilon(), 1.0 - epsilon())
bce = target * tf.math.log(output)
bce += (1 - target) * tf.math.log(1 - output)
return -bce
def moments(x, axes, keepdims=False):
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = tf.reduce_mean(x, axes, keepdims=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
# Note: stop_gradient does not change the gradient to the mean, because that
# gradient is zero.
variance = tf.reduce_mean(
tf.square(x), axis=axes, keepdims=True
) - tf.square(tf.stop_gradient(mean))
if not keepdims:
mean = tf.squeeze(mean, axes)
variance = tf.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = tf.clip_by_value(mean, tf.float16.min, tf.float16.max)
variance = tf.clip_by_value(variance, tf.float16.min, tf.float16.max)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
| keras-core/keras_core/backend/tensorflow/nn.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/nn.py",
"repo_id": "keras-core",
"token_count": 9582
} | 30 |
import numpy as np
import torch
from keras_core.backend import config
from keras_core.backend.torch.core import cast
from keras_core.backend.torch.core import convert_to_tensor
from keras_core.backend.torch.core import get_device
from keras_core.backend.torch.core import is_tensor
from keras_core.backend.torch.core import to_torch_dtype
TORCH_INT_TYPES = (
torch.int8,
torch.int16,
torch.int32,
torch.int64,
)
def add(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.add(x1, x2)
def einsum(subscripts, *operands, **kwargs):
operands = [convert_to_tensor(operand) for operand in operands]
return torch.einsum(subscripts, *operands)
def subtract(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.subtract(x1, x2)
def matmul(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.matmul(x1, x2)
def multiply(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
if isinstance(x, (list, tuple)):
x = stack(x)
x = convert_to_tensor(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
# Conversion to float necessary for `torch.mean`
x = cast(x, "float32") if x.dtype in TORCH_INT_TYPES else x
return torch.mean(x, axis=axis, keepdims=keepdims)
def max(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
if 0 in x.shape:
if initial is None:
raise ValueError("Cannot compute the max of an empty tensor.")
elif keepdims:
return torch.full((1,) * len(x.shape), initial)
else:
return torch.tensor(initial)
if axis is None:
result = torch.max(x)
else:
result = amax(x, axis=axis, keepdims=keepdims)
if isinstance(getattr(result, "values", None), torch.Tensor):
result = result.values
if initial is not None:
initial = convert_to_tensor(initial)
return torch.maximum(result, torch.full(result.shape, initial))
return result
def ones(shape, dtype="float32"):
dtype = to_torch_dtype(dtype)
if isinstance(shape, int):
shape = (shape,)
return torch.ones(size=shape, dtype=dtype, device=get_device())
def zeros(shape, dtype="float32"):
dtype = to_torch_dtype(dtype)
if isinstance(shape, int):
shape = (shape,)
return torch.zeros(size=shape, dtype=dtype, device=get_device())
def zeros_like(x, dtype=None):
x = convert_to_tensor(x)
dtype = to_torch_dtype(dtype or x.dtype)
return torch.zeros_like(x, dtype=dtype)
def absolute(x):
return abs(x)
def abs(x):
x = convert_to_tensor(x)
return torch.abs(x)
def all(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.all(x)
if not isinstance(axis, (list, tuple)):
axis = (axis,)
for a in axis:
# `torch.all` does not handle multiple axes.
x = torch.all(x, dim=a, keepdim=keepdims)
return x
def any(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.any(x)
if not isinstance(axis, (list, tuple)):
axis = (axis,)
for a in axis:
# `torch.any` does not handle multiple axes.
x = torch.any(x, dim=a, keepdim=keepdims)
return x
def amax(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.amax(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
return torch.amax(x, dim=axis, keepdim=keepdims)
def amin(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.amin(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
return torch.amin(x, dim=axis, keepdim=keepdims)
def append(
x1,
x2,
axis=None,
):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
if axis is None:
return torch.cat((x1.flatten(), x2.flatten()))
return torch.cat((x1, x2), dim=axis)
def arange(start, stop=None, step=1, dtype=None):
if dtype is None:
if hasattr(start, "dtype"):
dtype = start.dtype
elif isinstance(start, int):
dtype = "int32"
else:
dtype = config.floatx()
dtype = to_torch_dtype(dtype)
if stop is None:
return torch.arange(end=start, dtype=dtype, device=get_device())
return torch.arange(
start, stop, step=step, dtype=dtype, device=get_device()
)
def arccos(x):
x = convert_to_tensor(x)
return torch.arccos(x)
def arccosh(x):
x = convert_to_tensor(x)
return torch.arccosh(x)
def arcsin(x):
x = convert_to_tensor(x)
return torch.arcsin(x)
def arcsinh(x):
x = convert_to_tensor(x)
return torch.arcsinh(x)
def arctan(x):
x = convert_to_tensor(x)
return torch.arctan(x)
def arctan2(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.arctan2(x1, x2)
def arctanh(x):
x = convert_to_tensor(x)
return torch.arctanh(x)
def argmax(x, axis=None):
x = convert_to_tensor(x)
return torch.argmax(x, dim=axis)
def argmin(x, axis=None):
x = convert_to_tensor(x)
return torch.argmin(x, dim=axis)
def argsort(x, axis=-1):
x = convert_to_tensor(x)
if axis is None:
axis = -1
x = x.reshape(-1)
return torch.argsort(x, dim=axis, stable=True)
def array(x, dtype=None):
dtype = to_torch_dtype(dtype)
if isinstance(x, torch.Tensor):
return x
return torch.tensor(x, dtype=dtype, device=get_device())
def average(x, axis=None, weights=None):
x = convert_to_tensor(x)
# Conversion to float necessary for `torch.mean`
x = cast(x, "float32") if x.dtype in TORCH_INT_TYPES else x
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
if weights is not None:
weights = convert_to_tensor(weights)
return torch.sum(torch.mul(x, weights), dim=axis) / torch.sum(
weights, dim=-1
)
return torch.mean(x, axis)
def bincount(x, weights=None, minlength=0):
x = convert_to_tensor(x, dtype=int)
if weights is not None:
weights = convert_to_tensor(weights)
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return torch.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return torch.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return torch.stack(bincounts)
return torch.bincount(x, weights, minlength)
def broadcast_to(x, shape):
x = convert_to_tensor(x)
return torch.broadcast_to(x, shape)
def ceil(x):
x = convert_to_tensor(x)
return torch.ceil(x)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
x_min, x_max = convert_to_tensor(x_min), convert_to_tensor(x_max)
return torch.clip(x, min=x_min, max=x_max)
def concatenate(xs, axis=0):
xs = [convert_to_tensor(x) for x in xs]
return torch.cat(xs, dim=axis)
def conjugate(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.conj(x).resolve_conj()
def conj(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.conj(x).resolve_conj()
def copy(x):
x = convert_to_tensor(x)
return torch.clone(x)
def cos(x):
x = convert_to_tensor(x)
return torch.cos(x)
def cosh(x):
x = convert_to_tensor(x)
return torch.cosh(x)
def count_nonzero(x, axis=None):
x = convert_to_tensor(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return cast(torch.ne(x, 0), "int32")
return torch.count_nonzero(x, dim=axis).T
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=-1):
if axisa != -1 or axisb != -1 or axisc != -1:
raise ValueError(
"Torch backend does not support `axisa`, `axisb`, or `axisc`. "
f"Received: axisa={axisa}, axisb={axisb}, axisc={axisc}. Please "
"use `axis` arg in torch backend."
)
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.cross(x1, x2, dim=axis)
def cumprod(x, axis=None):
x = convert_to_tensor(x)
if axis is None:
x = x.flatten()
axis = 0
return torch.cumprod(x, dim=axis)
def cumsum(x, axis=None):
x = convert_to_tensor(x)
if axis is None:
x = x.flatten()
axis = 0
return torch.cumsum(x, dim=axis)
def diag(x, k=0):
x = convert_to_tensor(x)
return torch.diag(x, diagonal=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
x = convert_to_tensor(x)
return torch.diagonal(
x,
offset=offset,
dim1=axis1,
dim2=axis2,
)
def digitize(x, bins):
x = convert_to_tensor(x)
bins = convert_to_tensor(bins)
return cast(torch.bucketize(x, bins, right=True), "int32")
def dot(x, y):
x, y = convert_to_tensor(x), convert_to_tensor(y)
if x.ndim == 0 or y.ndim == 0:
return torch.multiply(x, y)
return torch.matmul(x, y)
def empty(shape, dtype="float32"):
dtype = to_torch_dtype(dtype)
return torch.empty(size=shape, dtype=dtype, device=get_device())
def equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.eq(x1, x2)
def exp(x):
x = convert_to_tensor(x)
return torch.exp(x)
def expand_dims(x, axis):
x = convert_to_tensor(x)
return torch.unsqueeze(x, dim=axis)
def expm1(x):
x = convert_to_tensor(x)
return torch.expm1(x)
def flip(x, axis=None):
x = convert_to_tensor(x)
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
return torch.flip(x, dims=axis)
def floor(x):
x = convert_to_tensor(x)
return torch.floor(x)
def full(shape, fill_value, dtype=None):
dtype = to_torch_dtype(dtype)
fill_value = convert_to_tensor(fill_value, dtype=dtype)
if len(fill_value.shape) > 0:
# `torch.full` only supports scala `fill_value`.
expand_size = len(shape) - len(fill_value.shape)
tile_shape = tuple(shape[:expand_size]) + (1,) * len(fill_value.shape)
return torch.tile(fill_value, tile_shape)
return torch.full(
size=shape, fill_value=fill_value, dtype=dtype, device=get_device()
)
def full_like(x, fill_value, dtype=None):
return full(shape=x.shape, fill_value=fill_value, dtype=dtype)
def greater(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.greater(x1, x2)
def greater_equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.greater_equal(x1, x2)
def hstack(xs):
xs = [convert_to_tensor(x) for x in xs]
return torch.hstack(xs)
def identity(n, dtype="float32"):
dtype = to_torch_dtype(dtype)
return torch.eye(n, dtype=dtype)
def imag(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.imag(x)
def isclose(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
if x1.dtype != x2.dtype:
result_dtype = torch.result_type(x1, x2)
if x1.dtype != result_dtype:
x1 = cast(x1, result_dtype)
else:
x2 = cast(x2, result_dtype)
return torch.isclose(x1, x2)
def isfinite(x):
x = convert_to_tensor(x)
return torch.isfinite(x)
def isinf(x):
x = convert_to_tensor(x)
return torch.isinf(x)
def isnan(x):
x = convert_to_tensor(x)
return torch.isnan(x)
def less(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.less(x1, x2)
def less_equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
if axis != 0:
raise ValueError(
"torch.linspace does not support an `axis` argument. "
f"Received axis={axis}"
)
dtype = to_torch_dtype(dtype)
if endpoint is False:
stop = stop - ((stop - start) / num)
if hasattr(start, "__len__") and hasattr(stop, "__len__"):
start, stop = convert_to_tensor(start), convert_to_tensor(stop)
stop = cast(stop, dtype) if endpoint is False and dtype else stop
steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1)
# reshape `steps` to allow for broadcasting
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# increments from `start` to `stop` in each dimension
linspace = start[None] + steps * (stop - start)[None]
else:
linspace = torch.linspace(
start=start,
end=stop,
steps=num,
dtype=dtype,
)
if retstep is True:
return (linspace, num)
return linspace
def log(x):
x = convert_to_tensor(x)
return torch.log(x)
def log10(x):
x = convert_to_tensor(x)
return torch.log10(x)
def log1p(x):
x = convert_to_tensor(x)
return torch.log1p(x)
def log2(x):
x = convert_to_tensor(x)
return torch.log2(x)
def logaddexp(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
x1 = cast(x1, "float32") if x1.dtype in TORCH_INT_TYPES else x1
x2 = cast(x2, "float32") if x2.dtype in TORCH_INT_TYPES else x2
return torch.logaddexp(x1, x2)
def logical_and(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.logical_and(x1, x2)
def logical_not(x):
x = convert_to_tensor(x)
return torch.logical_not(x)
def logical_or(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
if axis != 0:
raise ValueError(
"torch.logspace does not support an `axis` argument. "
f"Received axis={axis}"
)
dtype = to_torch_dtype(dtype)
if endpoint is False:
stop = stop - ((stop - start) / num)
if hasattr(start, "__len__") and hasattr(stop, "__len__"):
start, stop = convert_to_tensor(start), convert_to_tensor(stop)
stop = cast(stop, dtype) if endpoint is False and dtype else stop
steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1)
# reshape `steps` to allow for broadcasting
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# increments from `start` to `stop` in each dimension
linspace = start[None] + steps * (stop - start)[None]
logspace = base**linspace
else:
logspace = torch.logspace(
start=start,
end=stop,
steps=num,
base=base,
dtype=dtype,
)
return logspace
def maximum(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.maximum(x1, x2)
def meshgrid(*x, indexing="xy"):
x = [convert_to_tensor(sc_tensor) for sc_tensor in x]
return torch.meshgrid(x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
if 0 in x.shape:
if initial is None:
raise ValueError("Cannot compute the min of an empty tensor.")
elif keepdims:
return torch.full((1,) * len(x.shape), initial)
else:
return torch.tensor(initial)
if axis is None:
result = torch.min(x)
else:
if isinstance(axis, list):
axis = axis[-1]
result = torch.min(x, dim=axis, keepdim=keepdims)
if isinstance(getattr(result, "values", None), torch.Tensor):
result = result.values
if initial is not None:
initial = convert_to_tensor(initial)
return torch.minimum(result, initial)
return result
def minimum(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.minimum(x1, x2)
def mod(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.remainder(x1, x2)
def moveaxis(x, source, destination):
x = convert_to_tensor(x)
return torch.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
x = convert_to_tensor(x)
return torch.nan_to_num(x)
def ndim(x):
x = convert_to_tensor(x)
return x.ndim
def nonzero(x):
x = convert_to_tensor(x)
return torch.nonzero(x).T
def not_equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.not_equal(x1, x2)
def ones_like(x, dtype=None):
x = convert_to_tensor(x)
dtype = to_torch_dtype(dtype)
return torch.ones_like(x, dtype=dtype)
def outer(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.outer(x1.flatten(), x2.flatten())
def pad(x, pad_width, mode="constant"):
x = convert_to_tensor(x)
pad_sum = []
pad_width = list(pad_width)[::-1] # torch uses reverse order
pad_width_sum = 0
for pad in pad_width:
pad_width_sum += pad[0] + pad[1]
for pad in pad_width:
pad_sum += pad
pad_width_sum -= pad[0] + pad[1]
if pad_width_sum == 0: # early break when no padding in higher order
break
if mode == "symmetric":
mode = "replicate"
if mode == "constant":
return torch.nn.functional.pad(x, pad=pad_sum, mode=mode)
# TODO: reflect and symmetric padding are implemented for padding the
# last 3 dimensions of a 4D or 5D input tensor, the last 2 dimensions of a
# 3D or 4D input tensor, or the last dimension of a 2D or 3D input tensor.
# https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
ori_dtype = x.dtype
ori_ndim = x.ndim
need_squeeze = False
if x.ndim < 3:
need_squeeze = True
new_dims = [1] * (3 - x.ndim)
x = x.view(*new_dims, *x.shape)
need_cast = False
if x.dtype not in (torch.float32, torch.float64):
# TODO: reflect and symmetric padding are only supported with float32/64
# https://github.com/pytorch/pytorch/issues/40763
need_cast = True
x = cast(x, torch.float32)
x = torch.nn.functional.pad(x, pad=pad_sum, mode=mode)
if need_cast:
x = cast(x, ori_dtype)
if need_squeeze:
x = torch.squeeze(x, dim=tuple(range(3 - ori_ndim)))
return x
def prod(x, axis=None, keepdims=False, dtype=None):
x = convert_to_tensor(x)
dtype = to_torch_dtype(dtype)
if axis is None:
return torch.prod(x, dtype=dtype)
if not isinstance(axis, (list, tuple)):
axis = (axis,)
for a in axis:
# `torch.prod` does not handle multiple axes.
x = torch.prod(x, dim=a, keepdim=keepdims, dtype=dtype)
return x
def ravel(x):
x = convert_to_tensor(x)
return torch.ravel(x)
def real(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.real(x)
def reciprocal(x):
x = convert_to_tensor(x)
return torch.reciprocal(x)
def repeat(x, repeats, axis=None):
x = convert_to_tensor(x)
if get_device() == "meta":
# Import upper level modules locally to avoid circular imports
# TODO: Refactor the upper level modules to avoid these imports.
from keras_core.backend import KerasTensor
from keras_core.backend import standardize_dtype
from keras_core.ops.numpy import repeat
x = KerasTensor(x.shape, standardize_dtype(x.dtype))
outputs = repeat(x, repeats, axis=axis)
return torch.empty(
size=outputs.shape,
dtype=to_torch_dtype(outputs.dtype),
device=get_device(),
)
repeats = convert_to_tensor(repeats, dtype=int)
return torch.repeat_interleave(x, repeats, dim=axis)
def reshape(x, new_shape):
x = convert_to_tensor(x)
return torch.reshape(x, new_shape)
def roll(x, shift, axis=None):
x = convert_to_tensor(x)
return torch.roll(x, shift, dims=axis)
def sign(x):
x = convert_to_tensor(x)
return torch.sign(x)
def sin(x):
x = convert_to_tensor(x)
return torch.sin(x)
def sinh(x):
x = convert_to_tensor(x)
return torch.sinh(x)
def size(x):
x_shape = convert_to_tensor(tuple(x.shape))
return torch.prod(x_shape)
def sort(x, axis=-1):
x = convert_to_tensor(x)
return torch.sort(x, dim=axis).values
def split(x, indices_or_sections, axis=0):
x = convert_to_tensor(x)
dim = x.shape[axis]
if isinstance(indices_or_sections, (list, tuple)):
idxs = convert_to_tensor(indices_or_sections)
start_size = indices_or_sections[0]
end_size = dim - indices_or_sections[-1]
chunk_sizes = (
[start_size]
+ torch.diff(idxs).type(torch.int).tolist()
+ [end_size]
)
else:
if dim % indices_or_sections != 0:
raise ValueError(
f"Received indices_or_sections={indices_or_sections} "
f"(interpreted as a number of sections) and axis={axis}, "
f"but input dimension x.shape[{axis}]={x.shape[axis]} "
f"is not divisible by {indices_or_sections}. "
f"Full input shape: x.shape={x.shape}"
)
chunk_sizes = dim // indices_or_sections
out = torch.split(
tensor=x,
split_size_or_sections=chunk_sizes,
dim=axis,
)
if dim == 0 and isinstance(indices_or_sections, int):
out = tuple(out[0].clone() for _ in range(indices_or_sections))
return out
def stack(x, axis=0):
x = [convert_to_tensor(elem) for elem in x]
return torch.stack(x, dim=axis)
def std(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
# Conversion to float necessary for `torch.std`
x = cast(x, "float32") if x.dtype in TORCH_INT_TYPES else x
# Remove Bessel correction to align with numpy
return torch.std(x, dim=axis, keepdim=keepdims, unbiased=False)
def swapaxes(x, axis1, axis2):
x = convert_to_tensor(x)
return torch.swapaxes(x, axis0=axis1, axis1=axis2)
def take(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices).long()
if x.ndim == 2 and (axis is None or axis == 0):
# This case is equivalent to embedding lookup.
return torch.nn.functional.embedding(indices, x)
if axis is not None:
# make sure axis is non-negative
axis = len(x.shape) + axis if axis < 0 else axis
shape = x.shape[:axis] + indices.shape + x.shape[axis + 1 :]
# ravel the `indices` since `index_select` expects `indices`
# to be a vector (1-D tensor).
indices = indices.ravel()
out = torch.index_select(x, dim=axis, index=indices).squeeze(axis)
return out.reshape(shape)
return torch.take(x, index=indices)
def take_along_axis(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices).long()
return torch.take_along_dim(x, indices, dim=axis)
def tan(x):
x = convert_to_tensor(x)
return torch.tan(x)
def tanh(x):
x = convert_to_tensor(x)
return torch.tanh(x)
def tensordot(x1, x2, axes=2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
# Conversion to long necessary for `torch.tensordot`
x1 = cast(x1, "int64") if x1.dtype in TORCH_INT_TYPES else x1
x2 = cast(x2, "int64") if x2.dtype in TORCH_INT_TYPES else x2
return torch.tensordot(x1, x2, dims=axes)
def round(x, decimals=0):
x = convert_to_tensor(x)
return torch.round(x, decimals=decimals)
def tile(x, repeats):
if is_tensor(repeats):
repeats = tuple(repeats.int().numpy())
x = convert_to_tensor(x)
return torch.tile(x, dims=repeats)
def trace(x, offset=None, axis1=None, axis2=None):
x = convert_to_tensor(x)
return torch.sum(torch.diagonal(x, offset, axis1, axis2), dim=-1)
def tri(N, M=None, k=0, dtype="float32"):
dtype = to_torch_dtype(dtype)
M = M or N
x = torch.ones((N, M), dtype=dtype, device=get_device())
return torch.tril(x, diagonal=k)
def tril(x, k=0):
x = convert_to_tensor(x)
return torch.tril(x, diagonal=k)
def triu(x, k=0):
x = convert_to_tensor(x)
return torch.triu(x, diagonal=k)
def vdot(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.vdot(x1, x2)
def vstack(xs):
xs = [convert_to_tensor(x) for x in xs]
return torch.vstack(xs)
def where(condition, x1, x2):
condition = convert_to_tensor(condition, dtype=bool)
if x1 is not None and x2 is not None:
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.where(condition, x1, x2)
else:
return torch.where(condition)
def divide(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.divide(x1, x2)
def true_divide(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.true_divide(x1, x2)
def power(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.pow(x1, x2)
def negative(x):
x = convert_to_tensor(x)
return torch.negative(x)
def square(x):
x = convert_to_tensor(x)
return torch.square(x)
def sqrt(x):
x = convert_to_tensor(x)
return torch.sqrt(x)
def squeeze(x, axis=None):
x = convert_to_tensor(x)
if axis is not None:
return torch.squeeze(x, dim=axis)
return torch.squeeze(x)
def transpose(x, axes=None):
x = convert_to_tensor(x)
if axes is not None:
return torch.permute(x, dims=axes)
return x.T
def var(x, axis=None, keepdims=False):
x = convert_to_tensor(x, dtype="float32")
# Conversion to float necessary for `torch.var`
x = cast(x, "float32") if x.dtype in TORCH_INT_TYPES else x
if axis == [] or axis == ():
# Torch handles the empty axis case differently from numpy.
return zeros_like(x)
# Bessel correction removed for numpy compatibility
return torch.var(x, dim=axis, keepdim=keepdims, correction=0)
def sum(x, axis=None, keepdims=False):
if isinstance(x, (list, tuple)):
x = stack(x)
x = convert_to_tensor(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
if axis is not None:
return torch.sum(x, axis=axis, keepdim=keepdims)
return torch.sum(x)
def eye(N, M=None, k=None, dtype="float32"):
dtype = to_torch_dtype(dtype)
M = N if M is None else M
k = 0 if k is None else k
if k == 0:
return torch.eye(N, M, dtype=dtype, device=get_device())
diag_length = np.maximum(N, M)
diag = torch.ones(diag_length, dtype=dtype, device=get_device())
return torch.diag(diag, diagonal=k)[:N, :M]
def floor_divide(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.floor_divide(x1, x2)
def logical_xor(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.logical_xor(x1, x2)
| keras-core/keras_core/backend/torch/numpy.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/numpy.py",
"repo_id": "keras-core",
"token_count": 12808
} | 31 |
from keras_core.callbacks.backup_and_restore_callback import BackupAndRestore
from keras_core.callbacks.callback import Callback
from keras_core.callbacks.callback_list import CallbackList
from keras_core.callbacks.csv_logger import CSVLogger
from keras_core.callbacks.early_stopping import EarlyStopping
from keras_core.callbacks.history import History
from keras_core.callbacks.lambda_callback import LambdaCallback
from keras_core.callbacks.learning_rate_scheduler import LearningRateScheduler
from keras_core.callbacks.model_checkpoint import ModelCheckpoint
from keras_core.callbacks.progbar_logger import ProgbarLogger
from keras_core.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
from keras_core.callbacks.remote_monitor import RemoteMonitor
from keras_core.callbacks.tensorboard import TensorBoard
from keras_core.callbacks.terminate_on_nan import TerminateOnNaN
| keras-core/keras_core/callbacks/__init__.py/0 | {
"file_path": "keras-core/keras_core/callbacks/__init__.py",
"repo_id": "keras-core",
"token_count": 265
} | 32 |
"""Utilities common to CIFAR10 and CIFAR100 datasets."""
import _pickle as cPickle
def load_batch(fpath, label_key="labels"):
"""Internal utility for parsing CIFAR data.
Args:
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
Returns:
A tuple `(data, labels)`.
"""
with open(fpath, "rb") as f:
d = cPickle.load(f, encoding="bytes")
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode("utf8")] = v
d = d_decoded
data = d["data"]
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
| keras-core/keras_core/datasets/cifar.py/0 | {
"file_path": "keras-core/keras_core/datasets/cifar.py",
"repo_id": "keras-core",
"token_count": 322
} | 33 |
from keras_core.api_export import keras_core_export
@keras_core_export(
["keras_core.Initializer", "keras_core.initializers.Initializer"]
)
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config()` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras_core.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
| keras-core/keras_core/initializers/initializer.py/0 | {
"file_path": "keras-core/keras_core/initializers/initializer.py",
"repo_id": "keras-core",
"token_count": 1022
} | 34 |
import numpy as np
import pytest
from keras_core import testing
from keras_core.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
| keras-core/keras_core/layers/activations/softmax_test.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/softmax_test.py",
"repo_id": "keras-core",
"token_count": 887
} | 35 |
from keras_core.api_export import keras_core_export
from keras_core.layers.convolutional.base_conv_transpose import (
BaseConvTranspose,
)
@keras_core_export(
[
"keras_core.layers.Conv2DTranspose",
"keras_core.layers.Convolution2DTranspose",
]
)
class Conv2DTranspose(BaseConvTranspose):
"""2D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transposed convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated transposed convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`
Returns:
A 4D tensor representing
`activation(conv2d_transpose(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Examples:
>>> x = np.random.rand(4, 10, 8, 128)
>>> y = keras_core.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x)
>>> print(y.shape)
(4, 20, 16, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
| keras-core/keras_core/layers/convolutional/conv2d_transpose.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/conv2d_transpose.py",
"repo_id": "keras-core",
"token_count": 2313
} | 36 |
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Embedding")
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on positive integer inputs of a fixed range.
Example:
>>> model = keras_core.Sequential()
>>> model.add(keras_core.layers.Embedding(1000, 64, input_length=10))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
This is useful when using recurrent layers which
may take variable length input. If this is `True`,
then all subsequent layers in the model need
to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence,
index 0 cannot be used in the vocabulary (input_dim should
equal size of vocabulary + 1).
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
**kwargs,
):
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.autocast = False
def build(self, input_shape=None):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
trainable=True,
)
self.built = True
def call(self, inputs):
if inputs.dtype != "int32" and inputs.dtype != "int64":
inputs = ops.cast(inputs, "int32")
outputs = ops.take(self.embeddings, inputs, axis=0)
return ops.cast(outputs, dtype=self.compute_dtype)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
def compute_output_shape(self, input_shape):
return input_shape + (self.output_dim,)
def get_config(self):
base_config = super().get_config()
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
}
return {**base_config, **config}
| keras-core/keras_core/layers/core/embedding.py/0 | {
"file_path": "keras-core/keras_core/layers/core/embedding.py",
"repo_id": "keras-core",
"token_count": 2010
} | 37 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
@keras_core_export("keras_core.layers.Add")
class Add(Merge):
"""Performs elementwise addition operation.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.Add()([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `added = keras_core.layers.add([x1, x2])`
>>> added = keras_core.layers.Add()([x1, x2])
>>> out = keras_core.layers.Dense(4)(added)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output
@keras_core_export("keras_core.layers.add")
def add(inputs, **kwargs):
"""Functional interface to the `keras_core.layers.Add` layer.
Args:
inputs: A list of input tensors with the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the sum of the inputs. It has the same shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.add([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> added = keras_core.layers.add([x1, x2])
>>> out = keras_core.layers.Dense(4)(added)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
return Add(**kwargs)(inputs)
| keras-core/keras_core/layers/merging/add.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/add.py",
"repo_id": "keras-core",
"token_count": 934
} | 38 |
import numpy as np
import pytest
from keras_core import layers
from keras_core import ops
from keras_core import regularizers
from keras_core import testing
class LayerNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"scale": False, "center": False},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"rms_scaling": True},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
expected_output_shape=(2, 8, 8, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={},
input_shape=(1, 0, 10),
expected_output_shape=(1, 0, 10),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Expected an int or a list/tuple of ints for the argument "
"'axis'"
),
):
layers.LayerNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.LayerNormalization(dtype="float32")
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
out = layer(inputs)
out -= layer.beta
out /= layer.gamma
self.assertAllClose(ops.mean(out), 0.0, atol=1e-1)
self.assertAllClose(ops.std(out), 1.0, atol=1e-1)
def test_output(self):
layer = layers.LayerNormalization(
dtype="float32",
beta_initializer="ones",
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[-0.41386, 0.29307, 1.0, 1.70693, 2.41386]])
def test_output_with_rms_scaling(self):
layer = layers.LayerNormalization(
dtype="float32",
rms_scaling=True,
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[0.0, 0.70693, 1.41386, 2.12079, 2.82772]])
| keras-core/keras_core/layers/normalization/layer_normalization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/layer_normalization_test.py",
"repo_id": "keras-core",
"token_count": 2315
} | 39 |
import os
import pytest
from tensorflow import data as tf_data
from keras_core import backend
from keras_core import layers
from keras_core import models
from keras_core import ops
from keras_core import testing
from keras_core.layers.preprocessing import feature_space
from keras_core.saving import saving_api
class FeatureSpaceTest(testing.TestCase):
def _get_train_data_dict(
self,
as_dataset=False,
as_tensors=False,
as_labeled_dataset=False,
include_strings=True,
):
data = {
"float_1": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_2": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"float_3": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_2": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"int_3": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
if include_strings:
data["string_1"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
data["string_2"] = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
if as_dataset:
return tf_data.Dataset.from_tensor_slices(data)
elif as_tensors:
return {
key: ops.convert_to_tensor(value) for key, value in data.items()
}
elif as_labeled_dataset:
labels = [0, 1, 0, 1, 0, 0, 1, 0, 1, 1]
return tf_data.Dataset.from_tensor_slices((data, labels))
return data
def test_basic_usage_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2"), ("int_2", "int_3")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test batched adapt
fs.adapt(
self._get_train_data_dict(
as_dataset=True, include_strings=False
).batch(4)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
out_dim = 152
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on backend tensors
data = self._get_train_data_dict(as_tensors=True, include_strings=False)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict_no_strings(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("int_1", "int_2")],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
# Test unbatched call on raw data
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["int_1_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict(include_strings=False))
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
# Test batched call on backend tensors
out = fs(
self._get_train_data_dict(as_tensors=True, include_strings=False)
)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints_no_strings(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("int_1", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 7)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["int_1_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_1_X_int_2"].dtype).startswith(
"int"
)
)
def test_basic_usage(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
# Test unbatched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test batched adapt
fs.adapt(self._get_train_data_dict(as_dataset=True).batch(4))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
out_dim = 195
self.assertEqual(out.shape, (out_dim,))
# Test unbatched call on tensors
if backend.backend() == "tensorflow":
data = self._get_train_data_dict(as_tensors=True)
data = {key: value[0] for key, value in data.items()}
out = fs(data)
self.assertEqual(out.shape, (out_dim,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertEqual(out.shape, (10, out_dim))
# Test batched call on tensors
if backend.backend() == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertEqual(out.shape, (10, out_dim))
def test_output_mode_dict(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
# Test unbatched call on raw data
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (11,))
self.assertEqual(out["int_2"].shape, (32,))
self.assertEqual(out["string_2_X_int_2"].shape, (32,))
# Test batched call on raw data
out = fs(self._get_train_data_dict())
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
# Test batched call on tensors
if backend.backend == "tensorflow":
out = fs(self._get_train_data_dict(as_tensors=True))
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (10, 11))
self.assertEqual(out["int_2"].shape, (10, 32))
self.assertEqual(out["string_2_X_int_2"].shape, (10, 32))
def test_output_mode_dict_of_ints(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": cls.string_categorical(output_mode="int"),
"string_2": cls.string_hashed(num_bins=32, output_mode="int"),
"int_1": cls.integer_categorical(output_mode="int"),
"int_2": cls.integer_hashed(num_bins=32, output_mode="int"),
"int_3": cls.integer_categorical(output_mode="int"),
},
crosses=[
cls.cross(
("float_3", "string_1"), output_mode="int", crossing_dim=32
),
cls.cross(
("string_2", "int_2"), output_mode="int", crossing_dim=32
),
],
output_mode="dict",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertIsInstance(out, dict)
self.assertLen(out, 10)
self.assertEqual(out["string_1"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_1"].dtype).startswith("int")
)
self.assertEqual(out["int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["int_2"].dtype).startswith("int")
)
self.assertEqual(out["string_2_X_int_2"].shape, (1,))
self.assertTrue(
backend.standardize_dtype(out["string_2_X_int_2"].dtype).startswith(
"int"
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string dtype."
)
def test_functional_api_sync_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"string_1": "string_categorical",
"string_2": "string_hashed",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "string_1"), ("string_2", "int_2")],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
inputs = fs.get_inputs()
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=inputs, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(as_labeled_dataset=True)
model.fit(ds.batch(4))
model.evaluate(ds.batch(4))
ds = self._get_train_data_dict(as_dataset=True)
model.predict(ds.batch(4))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TODO: debug it"
)
def test_tf_data_async_processing(self):
fs = feature_space.FeatureSpace(
features={
"float_1": "float",
"float_2": "float_normalized",
"float_3": "float_discretized",
"int_1": "integer_categorical",
"int_2": "integer_hashed",
"int_3": "integer_categorical",
},
crosses=[("float_3", "int_1"), ("int_1", "int_2")],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
features = fs.get_encoded_features()
outputs = layers.Dense(1)(features)
model = models.Model(inputs=features, outputs=outputs)
model.compile("adam", "mse")
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
# Try map before batch
ds = ds.map(lambda x, y: (fs(x), y))
model.fit(ds.batch(4))
# Try map after batch
ds = self._get_train_data_dict(
as_labeled_dataset=True, include_strings=False
)
ds = ds.batch(4)
ds = ds.map(lambda x, y: (fs(x), y))
model.evaluate(ds)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
ds = ds.map(fs)
model.predict(ds.batch(4))
def test_advanced_usage(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"string_1": cls.string_categorical(max_tokens=5),
"string_2": cls.string_hashed(num_bins=32),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "string_1"), crossing_dim=32),
cls.cross(("string_2", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(self._get_train_data_dict(as_dataset=True))
data = {
key: value[0] for key, value in self._get_train_data_dict().items()
}
out = fs(data)
self.assertEqual(out.shape, (148,))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TODO: debug it"
)
def test_manual_kpl(self):
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
cls = feature_space.FeatureSpace
# Test with a tf-idf TextVectorization layer
tv = layers.TextVectorization(output_mode="tf_idf")
fs = feature_space.FeatureSpace(
features={
"text": cls.feature(
preprocessor=tv, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
fs.adapt(tf_data.Dataset.from_tensor_slices(data))
out = fs(data)
self.assertEqual(out.shape, [3, 5])
def test_no_adapt(self):
data = {
"int_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
fs = feature_space.FeatureSpace(
{
"int_1": "integer_hashed",
},
output_mode="concat",
)
out = fs(data)
self.assertEqual(tuple(out.shape), (10, 32))
@pytest.mark.skipif(backend.backend() == "numpy", reason="TODO: debug it")
def test_saving(self):
cls = feature_space.FeatureSpace
fs = feature_space.FeatureSpace(
features={
"float_1": cls.float(),
"float_2": cls.float_normalized(),
"float_3": cls.float_discretized(num_bins=3),
"int_1": cls.integer_categorical(
max_tokens=5, num_oov_indices=2
),
"int_2": cls.integer_hashed(num_bins=32),
"int_3": cls.integer_categorical(max_tokens=5),
},
crosses=[
cls.cross(("float_3", "int_1"), crossing_dim=32),
cls.cross(("int_1", "int_2"), crossing_dim=32),
],
output_mode="concat",
)
fs.adapt(
self._get_train_data_dict(as_dataset=True, include_strings=False)
)
data = {
key: value[0]
for key, value in self._get_train_data_dict(
include_strings=False
).items()
}
ref_out = fs(data)
temp_filepath = os.path.join(self.get_temp_dir(), "fs.keras")
fs.save(temp_filepath)
fs = saving_api.load_model(temp_filepath)
# Save again immediately after loading to test idempotency
temp_filepath = os.path.join(self.get_temp_dir(), "fs2.keras")
fs.save(temp_filepath)
# Test correctness of the first saved FS
out = fs(data)
self.assertAllClose(out, ref_out)
inputs = fs.get_inputs()
outputs = fs.get_encoded_features()
model = models.Model(inputs=inputs, outputs=outputs)
ds = self._get_train_data_dict(as_dataset=True, include_strings=False)
out = model.predict(ds.batch(4))
self.assertAllClose(out[0], ref_out)
# Test correctness of the re-saved FS
fs = saving_api.load_model(temp_filepath)
out = fs(data)
self.assertAllClose(out, ref_out)
def test_errors(self):
# Test no features
with self.assertRaisesRegex(ValueError, "cannot be None or empty"):
feature_space.FeatureSpace(features={})
# Test no crossing dim
with self.assertRaisesRegex(ValueError, "`crossing_dim`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "f2")],
crossing_dim=None,
)
# Test wrong cross feature name
with self.assertRaisesRegex(ValueError, "should be present in "):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
crosses=[("f1", "unknown")],
crossing_dim=32,
)
# Test wrong output mode
with self.assertRaisesRegex(ValueError, "for argument `output_mode`"):
feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
},
output_mode="unknown",
)
# Test call before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs({"f1": [0], "f2": [0]})
# Test get_encoded_features before adapt
with self.assertRaisesRegex(ValueError, "You need to call `.adapt"):
fs = feature_space.FeatureSpace(
features={
"f1": "integer_categorical",
"f2": "integer_categorical",
}
)
fs.get_encoded_features()
| keras-core/keras_core/layers/preprocessing/feature_space_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/feature_space_test.py",
"repo_id": "keras-core",
"token_count": 11387
} | 40 |
import numpy as np
from tensorflow import data as tf_data
from keras_core import layers
from keras_core import testing
class RandomCropTest(testing.TestCase):
def test_random_crop(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 1,
"width": 1,
},
input_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
layer = layers.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_random_crop_partial(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 8,
},
input_shape=(12, 8, 16, 3),
expected_output_shape=(12, 8, 8, 3),
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_height(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 10,
"width": 8,
},
input_shape=(12, 8, 16, 3),
expected_output_shape=(12, 10, 8, 3),
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_width(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 18,
},
input_shape=(12, 8, 16, 3),
expected_output_shape=(12, 8, 18, 3),
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
| keras-core/keras_core/layers/preprocessing/random_crop_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_crop_test.py",
"repo_id": "keras-core",
"token_count": 1205
} | 41 |
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras_core import backend
from keras_core import layers
from keras_core import models
from keras_core import testing
class TextVectorizationTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["one", "two"],
output_sequence_length=5,
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(["foo bar", "bar baz", "baz bada boom"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_fixed_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_set_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.set_vocabulary(["baz", "bar", "foo"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_tf_data_compatibility(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
# Test adapt flow
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
)
def test_tf_as_first_sequential_layer(self):
layer = layers.TextVectorization(
max_tokens=10,
output_mode="int",
output_sequence_length=3,
)
layer.set_vocabulary(["baz", "bar", "foo"])
model = models.Sequential(
[
layer,
layers.Embedding(5, 4),
]
)
model(backend.convert_to_tensor([["foo qux bar"], ["qux baz"]]))
| keras-core/keras_core/layers/preprocessing/text_vectorization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/text_vectorization_test.py",
"repo_id": "keras-core",
"token_count": 1776
} | 42 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
@keras_core_export("keras_core.layers.Cropping2D")
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Examples:
>>> input_shape = (2, 28, 28, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras_core.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
>>> y.shape
(2, 24, 20, 3)
Args:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping is applied to height and
width.
- If tuple of 2 ints: interpreted as two different symmetric
cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints: interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, height, width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, height, width)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, cropped_height, cropped_width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, cropped_height, cropped_width)`
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, "__len__"):
if len(cropping) != 2:
raise ValueError(
"`cropping` should have two elements. "
f"Received: cropping={cropping}."
)
height_cropping = argument_validation.standardize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
width_cropping = argument_validation.standardize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError(
"`cropping` should be either an int, a tuple of 2 ints "
"(symmetric_height_crop, symmetric_width_crop), "
"or a tuple of 2 tuples of 2 ints "
"((top_crop, bottom_crop), (left_crop, right_crop)). "
f"Received: cropping={cropping}."
)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
if (
input_shape[2] is not None
and sum(self.cropping[0]) >= input_shape[2]
) or (
input_shape[3] is not None
and sum(self.cropping[1]) >= input_shape[3]
):
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
return (
input_shape[0],
input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] is not None
else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] is not None
else None,
)
else:
if (
input_shape[1] is not None
and sum(self.cropping[0]) >= input_shape[1]
) or (
input_shape[2] is not None
and sum(self.cropping[1]) >= input_shape[2]
):
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
return (
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] is not None
else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] is not None
else None,
input_shape[3],
)
def call(self, inputs):
if self.data_format == "channels_first":
if (
inputs.shape[2] is not None
and sum(self.cropping[0]) >= inputs.shape[2]
) or (
inputs.shape[3] is not None
and sum(self.cropping[1]) >= inputs.shape[3]
):
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:, :, self.cropping[0][0] :, self.cropping[1][0] :
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
]
else:
if (
inputs.shape[1] is not None
and sum(self.cropping[0]) >= inputs.shape[1]
) or (
inputs.shape[2] is not None
and sum(self.cropping[1]) >= inputs.shape[2]
):
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:, self.cropping[0][0] :, self.cropping[1][0] :, :
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/cropping2d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/cropping2d.py",
"repo_id": "keras-core",
"token_count": 4702
} | 43 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
@keras_core_export("keras_core.layers.UpSampling3D")
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Example:
>>> input_shape = (2, 1, 2, 1, 3)
>>> x = np.ones(input_shape)
>>> y = keras_core.layers.UpSampling3D(size=(2, 2, 2))(x)
>>> y.shape
(2, 2, 4, 2, 3)
Args:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `"channels_last"` (default) or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else `"channels_last"`.
Defaults to `"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,
channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_dim1, upsampled_dim2,
upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.config.standardize_data_format(data_format)
self.size = argument_validation.standardize_tuple(size, 3, "size")
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
dim1 = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
dim2 = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
dim3 = (
self.size[2] * input_shape[4]
if input_shape[4] is not None
else None
)
return (input_shape[0], input_shape[1], dim1, dim2, dim3)
else:
dim1 = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
dim2 = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
dim3 = (
self.size[2] * input_shape[3]
if input_shape[3] is not None
else None
)
return (input_shape[0], dim1, dim2, dim3, input_shape[4])
def call(self, inputs):
return self._resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format
)
def get_config(self):
config = {"size": self.size, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
def _resize_volumes(
self, x, depth_factor, height_factor, width_factor, data_format
):
"""Resizes the volume contained in a 5D tensor.
Args:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
Resized tensor.
"""
if data_format == "channels_first":
output = ops.repeat(x, depth_factor, axis=2)
output = ops.repeat(output, height_factor, axis=3)
output = ops.repeat(output, width_factor, axis=4)
return output
elif data_format == "channels_last":
output = ops.repeat(x, depth_factor, axis=1)
output = ops.repeat(output, height_factor, axis=2)
output = ops.repeat(output, width_factor, axis=3)
return output
else:
raise ValueError(f"Invalid data_format: {data_format}")
| keras-core/keras_core/layers/reshaping/up_sampling3d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/up_sampling3d.py",
"repo_id": "keras-core",
"token_count": 2436
} | 44 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import ops
from keras_core import testing
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.TimeDistributed,
init_kwargs={"layer": layers.Dense(1, use_bias=False)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 1),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_build(self):
inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32)
conv_2d_layer = layers.Conv2D(64, (3, 3))
outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
self.assertEqual(outputs.shape, (32, 10, 126, 126, 64))
def test_correctness(self):
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.Dense(
1,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
layer = layers.TimeDistributed(layer=layer)
output = layer(sequence)
self.assertAllClose(
np.array(
[[[0.06], [0.22]], [[0.38], [0.53999996]], [[0.7], [0.86]]]
),
output,
)
def test_masking(self):
class MaskedDense(layers.Wrapper):
def __init__(self, units, **kwargs):
layer = layers.Dense(
units,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def call(self, inputs, training=False, mask=None):
unmasked = self.layer.call(inputs)
if mask is None:
return unmasked
else:
return ops.transpose(
ops.transpose(unmasked) * ops.cast(mask, inputs.dtype)
)
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.TimeDistributed(layer=MaskedDense(1))
mask = np.array([[False, True], [True, False], [True, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[[0], [0.22]], [[0.38], [0]], [[0.7], [0.86]]]),
output,
)
| keras-core/keras_core/layers/rnn/time_distributed_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/time_distributed_test.py",
"repo_id": "keras-core",
"token_count": 1315
} | 45 |
"""Legacy serialization logic for Keras models."""
import contextlib
import inspect
import json
import threading
import weakref
# isort: off
from keras_core.api_export import keras_core_export
from keras_core.saving import object_registration
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = "layer was saved without config"
# Store a unique, per-object ID for shared objects.
#
# We store a unique ID for each object so that we may, at loading time,
# re-create the network properly. Without this ID, we would have no way of
# determining whether a config is a description of a new object that
# should be created or is merely a reference to an already-created object.
SHARED_OBJECT_KEY = "shared_object_id"
SHARED_OBJECT_DISABLED = threading.local()
SHARED_OBJECT_LOADING = threading.local()
SHARED_OBJECT_SAVING = threading.local()
# Attributes on the threadlocal variable must be set per-thread, thus we
# cannot initialize these globally. Instead, we have accessor functions with
# default values.
def _shared_object_disabled():
"""Get whether shared object handling is disabled in a threadsafe manner."""
return getattr(SHARED_OBJECT_DISABLED, "disabled", False)
def _shared_object_loading_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_LOADING, "scope", NoopLoadingScope())
def _shared_object_saving_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_SAVING, "scope", None)
class DisableSharedObjectScope:
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
class NoopLoadingScope:
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared
objects (e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
class SharedObjectLoadingScope:
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find
already-loaded object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
class SharedObjectConfig(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super().__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID
# when it's strictly necessary, making backwards compatibility breakage
# less likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
class SharedObjectSavingScope:
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of
# reasons. We may end up with a case where we're opening a saving scope
# within another saving scope. In that case, we'd like to use the
# outermost scope available and ignore inner scopes, since there is not
# (yet) a reasonable use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, "_passthrough", False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None
):
"""Returns the serialization of the class with the given config."""
base_config = {"class_name": cls_name, "config": cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've
# already serialized this config. If so, just use that config. This will
# store an extra ID field in the config, allowing us to re-create the shared
# object relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
@contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
@keras_core_export(
[
"keras_core.legacy.saving.serialize_keras_object",
"keras_core.utils.legacy.serialize_keras_object",
]
)
def serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config.
"""
# _, instance = tf.__internal__.decorator.unwrap(instance)
instance = inspect.unwrap(instance)
if instance is None:
return None
if hasattr(instance, "get_config"):
name = object_registration.get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True}
)
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or
# dict for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(
item, dict
):
serialized_item["__passive_serialization__"] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = object_registration.get_registered_name(instance.__class__)
return serialize_keras_class_and_config(
name, serialization_config, instance
)
if hasattr(instance, "__name__"):
return object_registration.get_registered_name(instance)
raise ValueError(
f"Cannot serialize {instance} because it doesn't implement "
"`get_config()`."
)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Returns the class name and config for a serialized keras object."""
if (
not isinstance(config, dict)
or "class_name" not in config
or "config" not in config
):
raise ValueError(
f"Improper config format for {config}. "
"Expecting python dict contains `class_name` and `config` as keys"
)
class_name = config["class_name"]
cls = object_registration.get_registered_object(
class_name, custom_objects, module_objects
)
if cls is None:
raise ValueError(
f"Unknown {printable_module_name}: '{class_name}'. "
"Please ensure you are using a `keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
cls_config = config["config"]
# Check if `cls_config` is a list. If it is a list, return the class and the
# associated class configs for recursively deserialization. This case will
# happen on the old version of sequential model (e.g. `keras_version` ==
# "2.0.6"), which is serialized in a different structure, for example
# "{'class_name': 'Sequential',
# 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}".
if isinstance(cls_config, list):
return (cls, cls_config)
deserialized_objects = {}
for key, item in cls_config.items():
if key == "name":
# Assume that the value of 'name' is a string that should not be
# deserialized as a function. This avoids the corner case where
# cls_config['name'] has an identical name to a custom function and
# gets converted into that function.
deserialized_objects[key] = item
elif isinstance(item, dict) and "__passive_serialization__" in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name="config_item",
)
# TODO(momernick): Should this also have 'module_objects'?
elif isinstance(item, str) and inspect.isfunction(
object_registration.get_registered_object(item, custom_objects)
):
# Handle custom functions here. When saving functions, we only save
# the function's name as a string. If we find a matching string in
# the custom objects during deserialization, we convert the string
# back to the original function.
# Note that a potential issue is that a string field could have a
# naming conflict with a custom function name, but this should be a
# rare case. This issue does not occur if a string field has a
# naming conflict with a custom object, since the config of an
# object will always be a dict.
deserialized_objects[
key
] = object_registration.get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_core_export(
[
"keras_core.legacy.saving.deserialize_keras_object",
"keras_core.utils.legacy.deserialize_keras_object",
]
)
def deserialize_keras_object(
identifier,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Turns the serialized form of a Keras object back into an actual object.
This function is for mid-level library implementers rather than end users.
Importantly, this utility requires you to provide the dict of
`module_objects` to use for looking up the object config; this is not
populated by default. If you need a deserialization utility that has
preexisting knowledge of built-in Keras objects, use e.g.
`keras.layers.deserialize(config)`, `keras.metrics.deserialize(config)`,
etc.
Calling `deserialize_keras_object` while underneath the
`SharedObjectLoadingScope` context manager will cause any already-seen
shared objects to be returned as-is rather than creating a new object.
Args:
identifier: the serialized form of the object.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
printable_module_name: A human-readable string representing the type of
the object. Printed in case of exception.
Returns:
The deserialized object.
Example:
A mid-level library implementer might want to implement a utility for
retrieving an object from its config, as such:
```python
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
identifier,
module_objects=globals(),
custom_objects=custom_objects,
name="MyObjectType",
)
```
This is how e.g. `keras.layers.deserialize()` is implemented.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name
)
# If this object has already been loaded (i.e. it's shared between
# multiple objects), return the already-loaded object.
shared_object_id = config.get(SHARED_OBJECT_KEY)
shared_object = _shared_object_loading_scope().get(shared_object_id)
if shared_object is not None:
return shared_object
if hasattr(cls, "from_config"):
arg_spec = inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
# TODO(nkovela): Swap find and replace args during Keras 3.0 release
# Replace keras refs with keras_core
cls_config = _find_replace_nested_dict(
cls_config, "keras.", "keras_core."
)
if "custom_objects" in arg_spec.args:
deserialized_obj = cls.from_config(
cls_config,
custom_objects={
**object_registration.GLOBAL_CUSTOM_OBJECTS,
**custom_objects,
},
)
else:
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls(**cls_config)
# Add object to shared objects, in case we find it referenced again.
_shared_object_loading_scope().set(shared_object_id, deserialized_obj)
return deserialized_obj
elif isinstance(identifier, str):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif (
object_name
in object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__
):
obj = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__[
object_name
]
elif object_name in object_registration._GLOBAL_CUSTOM_OBJECTS:
obj = object_registration._GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
f"Unknown {printable_module_name}: '{object_name}'. "
"Please ensure you are using a "
"`keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if inspect.isclass(obj):
return obj()
return obj
elif inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError(
"Could not interpret serialized "
f"{printable_module_name}: {identifier}"
)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return (
isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
)
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def _find_replace_nested_dict(config, find, replace):
dict_str = json.dumps(config)
dict_str = dict_str.replace(find, replace)
config = json.loads(dict_str)
return config
| keras-core/keras_core/legacy/saving/serialization.py/0 | {
"file_path": "keras-core/keras_core/legacy/saving/serialization.py",
"repo_id": "keras-core",
"token_count": 8551
} | 46 |
import numpy as np
from keras_core import testing
from keras_core.metrics import iou_metrics as metrics
class IoUTest(testing.TestCase):
def test_config(self):
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertEqual(obj.num_classes, 2)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.IoU.from_config(obj.get_config())
self.assertEqual(obj2.name, "iou_class_1_0")
self.assertEqual(obj2.num_classes, 2)
self.assertEqual(obj2.target_class_ids, [1, 0])
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
obj = metrics.IoU(
num_classes=2, target_class_ids=[0, 1], dtype="float32"
)
result = obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.1 / (0.4 + 0.5 - 0.1) + 0.2 / (0.6 + 0.5 - 0.2)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
class BinaryIoUTest(testing.TestCase):
def test_config(self):
obj = metrics.BinaryIoU(
target_class_ids=[1, 0], threshold=0.1, name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.BinaryIoU.from_config(obj.get_config())
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj2.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
def test_different_thresholds_weighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[0.2, 0.4],
# [0.3, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
sample_weight = np.array([0.1, 0.2, 0.4, 0.3])
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[0.1+0.4, 0],
# [0.2, 0.3]]
# sum_row = [0.5, 0.5], sum_col = [0.7, 0.3], true_positives = [0.5,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.5 / (0.5 + 0.7 - 0.5) + 0.3 / (0.5 + 0.3 - 0.3)
) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_different_thresholds_unweighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[2, 0],
# [1, 1]]
# sum_row = [2, 2], sum_col = [3, 1], true_positives = [2, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (2 / (2 + 3 - 2) + 1 / (2 + 1 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_true = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_pred = np.array([[0.1, 0.7], [0.9, 0.3]])
threshold = 0.4 # y_pred will become [[0, 1], [1, 0]]
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
# cm = [[0.2, 0.4],
# [0.1, 0.3]]
# sum_row = [0.6, 0.4], sum_col = [0.3, 0.7], true_positives = [0.2,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.3 - 0.2) + 0.3 / (0.4 + 0.7 - 0.3)
) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.BinaryIoU(target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([0.6], dtype=np.float32)
threshold = 0.5
y_true = np.array([1])
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = 1 / (1 + 1 - 1)
self.assertAllClose(result, expected_result, atol=1e-3)
class MeanIoUTest(testing.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name="mean_iou")
self.assertEqual(m_obj.name, "mean_iou")
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, "mean_iou")
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_255(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, 255]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=255)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_unweighted_ignore_class_1(self):
y_pred = [0, 1, 1, 1]
y_true = [0, 1, 2, -1]
m_obj = metrics.MeanIoU(num_classes=3, ignore_class=-1)
result = m_obj(y_true, y_pred)
# cm = [[1, 0, 0],
# [0, 1, 0],
# [0, 1, 0]]
# sum_row = [1, 1, 1], sum_col = [1, 2, 0], true_positives = [1, 1, 0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
1 / (1 + 1 - 1) + 1 / (2 + 1 - 1) + 0 / (0 + 1 - 0)
) / 3
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted_ignore_class_1(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, -1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2, ignore_class=-1)
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.0]]
# sum_row = [0.6, 0.3], sum_col = [0.5, 0.4], true_positives = [0.2,
# 0.0]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.0 / (0.3 + 0.4 - 0.0)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.assertAllClose(m_obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
m_obj = metrics.MeanIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
class OneHotIoUTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (1 + 2 - 0) + 1 / (3 + 1 - 1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
sample_weight = [0.1, 0.2, 0.3, 0.4]
# cm = [[0, 0, 0.2+0.4],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class OneHotMeanIoUTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = np.array(
[[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], [0.1, 0.4, 0.5]]
)
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 0 + 1 / (3 + 1 - 1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_true = np.array(
[
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
]
)
# y_true will be converted to [2, 0, 1, 0, 0]
y_pred = np.array(
[
[0.2, 0.3, 0.5],
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.1],
[0.1, 0.4, 0.5],
[0.6, 0.2, 0.2],
]
)
# y_pred will be converted to [2, 2, 0, 2, 0]
sample_weight = [0.1, 0.2, 0.3, 0.3, 0.1]
# cm = [[0.1, 0, 0.2+0.3],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.4, 0, 0.6], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0.1, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.1 / (0.4 + 0.6 - 0.1) + 0 + 0.1 / (0.6 + 0.1 - 0.1)
) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
| keras-core/keras_core/metrics/iou_metrics_test.py/0 | {
"file_path": "keras-core/keras_core/metrics/iou_metrics_test.py",
"repo_id": "keras-core",
"token_count": 9125
} | 47 |
import warnings
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import testing
from keras_core.layers.core.input_layer import Input
from keras_core.layers.input_spec import InputSpec
from keras_core.models import Functional
from keras_core.models import Model
class FunctionalTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_flow_multi_input(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional([input_a, input_b], outputs, name="basic")
model.summary()
self.assertEqual(model.name, "basic")
self.assertIsInstance(model, Functional)
self.assertIsInstance(model, Model)
# Eager call
in_val = [np.random.random((2, 3)), np.random.random((2, 3))]
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2, name="input_a_2")
input_b_2 = Input(shape=(3,), batch_size=2, name="input_b_2")
in_val = [input_a_2, input_b_2]
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_scalar_input(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(), batch_size=2, name="input_b")
outputs = input_a + input_b[:, None]
model = Functional([input_a, input_b], outputs)
model.summary()
in_val = [np.zeros((2, 3)), np.ones((2,))]
out_val = model(in_val)
self.assertAllClose(out_val, np.ones((2, 3)))
@pytest.mark.requires_trainable_backend
def test_mutable_state(self):
inputs = Input(shape=(3,), batch_size=2, name="input")
x = layers.Dense(5)(inputs)
outputs = layers.Dense(5)(x)
model = Functional(inputs, outputs)
# Allow attaching state to a model that isn't directly part of the DAG.
# Most useful for functional subclasses.
model.extra_layer = layers.Dense(5)
@pytest.mark.requires_trainable_backend
def test_basic_flow_multi_output(self):
inputs = Input(shape=(3,), batch_size=2, name="input")
x = layers.Dense(5)(inputs)
output_a = layers.Dense(4)(x)
output_b = layers.Dense(5)(x)
model = Functional(inputs, [output_a, output_b])
# Eager call
in_val = np.random.random((2, 3))
out_val = model(in_val)
self.assertIsInstance(out_val, list)
self.assertEqual(len(out_val), 2)
self.assertEqual(out_val[0].shape, (2, 4))
self.assertEqual(out_val[1].shape, (2, 5))
# Symbolic call
out_val = model(Input(shape=(3,), batch_size=2))
self.assertIsInstance(out_val, list)
self.assertEqual(len(out_val), 2)
self.assertEqual(out_val[0].shape, (2, 4))
self.assertEqual(out_val[1].shape, (2, 5))
@pytest.mark.requires_trainable_backend
def test_basic_flow_dict_io(self):
input_a = Input(shape=(3,), batch_size=2, name="a")
input_b = Input(shape=(3,), batch_size=2, name="b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
with self.assertRaisesRegex(
ValueError, "all values in the dict must be KerasTensors"
):
model = Functional({"aa": [input_a], "bb": input_b}, outputs)
model = Functional({"a": input_a, "b": input_b}, outputs)
# Eager call
in_val = {"a": np.random.random((2, 3)), "b": np.random.random((2, 3))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(3,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_named_input_dict_io(self):
input_a = Input(shape=(3,), batch_size=2, name="a")
x = layers.Dense(5)(input_a)
outputs = layers.Dense(4)(x)
model = Functional(input_a, outputs)
# Eager call
in_val = {"a": np.random.random((2, 3))}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
in_val = {"a": input_a_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_input_dict_with_extra_field(self):
input_a = Input(shape=(3,), batch_size=2, name="a")
x = input_a * 5
outputs = x + 2
model = Functional({"a": input_a}, outputs)
# Eager call
with warnings.catch_warnings():
warnings.simplefilter("error")
in_val = {
"a": np.random.random((2, 3)),
"b": np.random.random((2, 1)),
}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 3))
with warnings.catch_warnings():
warnings.simplefilter("error")
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2)
input_b_2 = Input(shape=(1,), batch_size=2)
in_val = {"a": input_a_2, "b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 3))
@pytest.mark.requires_trainable_backend
def test_layer_getters(self):
# Test mixing ops and layers
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5, name="dense_1")(x)
outputs = layers.Dense(4, name="dense_2")(x)
model = Functional([input_a, input_b], outputs)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model._operations), 5)
self.assertEqual(model.get_layer(index=0).name, "input_a")
self.assertEqual(model.get_layer(index=1).name, "input_b")
self.assertEqual(model.get_layer(index=2).name, "dense_1")
self.assertEqual(model.get_layer(index=3).name, "dense_2")
self.assertEqual(model.get_layer(name="dense_1").name, "dense_1")
@pytest.mark.requires_trainable_backend
def test_training_arg(self):
class Canary(layers.Layer):
def call(self, x, training=False):
assert training
return x
def compute_output_spec(self, x, training=False):
return backend.KerasTensor(x.shape, dtype=x.dtype)
inputs = Input(shape=(3,), batch_size=2)
outputs = Canary()(inputs)
model = Functional(inputs, outputs)
model(np.random.random((2, 3)), training=True)
def test_mask_arg(self):
# TODO
pass
@pytest.mark.requires_trainable_backend
def test_passing_inputs_by_name(self):
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional([input_a, input_b], outputs)
# Eager call
in_val = {
"input_a": np.random.random((2, 3)),
"input_b": np.random.random((2, 3)),
}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
# Symbolic call
input_a_2 = Input(shape=(3,), batch_size=2, name="input_a_2")
input_b_2 = Input(shape=(3,), batch_size=2, name="input_b_2")
in_val = {"input_a": input_a_2, "input_b": input_b_2}
out_val = model(in_val)
self.assertEqual(out_val.shape, (2, 4))
@pytest.mark.requires_trainable_backend
def test_rank_standardization(self):
# Downranking
inputs = Input(shape=(3,), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs)
out_val = model(np.random.random((2, 3, 1)))
self.assertEqual(out_val.shape, (2, 3))
# Upranking
inputs = Input(shape=(3, 1), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs)
out_val = model(np.random.random((2, 3)))
self.assertEqual(out_val.shape, (2, 3, 3))
@pytest.mark.requires_trainable_backend
def test_dtype_standardization(self):
float_input = Input(shape=(2,), dtype="float16")
int_input = Input(shape=(2,), dtype="int32")
float_output = float_input + 2
int_output = int_input + 2
model = Functional((float_input, int_input), (float_output, int_output))
float_data, int_data = model((np.ones((2, 2)), np.ones((2, 2))))
self.assertEqual(backend.standardize_dtype(float_data.dtype), "float16")
self.assertEqual(backend.standardize_dtype(int_data.dtype), "int32")
@pytest.mark.requires_trainable_backend
def test_serialization(self):
# Test basic model
inputs = Input(shape=(3,), batch_size=2)
outputs = layers.Dense(3)(inputs)
model = Functional(inputs, outputs, trainable=False)
self.run_class_serialization_test(model)
# Test multi-io model
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
xa = layers.Dense(5, name="middle_a")(input_a)
xb = layers.Dense(5, name="middle_b")(input_b)
output_a = layers.Dense(4, name="output_a")(xa)
output_b = layers.Dense(4, name="output_b")(xb)
model = Functional(
[input_a, input_b], [output_a, output_b], name="func"
)
self.run_class_serialization_test(model)
# Test model that includes floating ops
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5, name="middle")(x)
output_a = layers.Dense(4, name="output_a")(x)
output_b = layers.Dense(4, name="output_b")(x)
model = Functional(
[input_a, input_b], [output_a, output_b], name="func"
)
self.run_class_serialization_test(model)
# Test model with dict i/o
input_a = Input(shape=(3,), batch_size=2, name="a")
input_b = Input(shape=(3,), batch_size=2, name="b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Functional({"a": input_a, "b": input_b}, outputs)
self.run_class_serialization_test(model)
@pytest.mark.requires_trainable_backend
def test_bad_input_spec(self):
# Single input
inputs = Input(shape=(4,))
outputs = layers.Dense(2)(inputs)
model = Functional(inputs, outputs)
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(ValueError, "expected 1 input"):
model([np.zeros((2, 4)), np.zeros((2, 4))])
# List input
input_a = Input(shape=(4,), name="a")
input_b = Input(shape=(4,), name="b")
x = input_a + input_b
outputs = layers.Dense(2)(x)
model = Functional([input_a, input_b], outputs)
with self.assertRaisesRegex(ValueError, "expected 2 input"):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model([np.zeros((2, 3)), np.zeros((2, 4))])
# Dict input
model = Functional({"a": input_a, "b": input_b}, outputs)
with self.assertRaisesRegex(ValueError, "expected 2 input"):
model(np.zeros((2, 3)))
with self.assertRaisesRegex(
ValueError, r"expected shape=\(None, 4\), found shape=\(2, 3\)"
):
model({"a": np.zeros((2, 3)), "b": np.zeros((2, 4))})
@pytest.mark.requires_trainable_backend
def test_manual_input_spec(self):
inputs = Input(shape=(None, 3))
outputs = layers.Dense(2)(inputs)
model = Functional(inputs, outputs)
model.input_spec = InputSpec(shape=(None, 4, 3))
with self.assertRaisesRegex(
ValueError,
r"expected shape=\(None, 4, 3\), found shape=\(2, 3, 3\)",
):
model(np.zeros((2, 3, 3)))
model(np.zeros((2, 4, 3)))
def test_add_loss(self):
# TODO
pass
| keras-core/keras_core/models/functional_test.py/0 | {
"file_path": "keras-core/keras_core/models/functional_test.py",
"repo_id": "keras-core",
"token_count": 6177
} | 48 |
"""Commonly-used neural network operations not included in NumPy."""
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.backend import KerasTensor
from keras_core.backend import any_symbolic_tensors
from keras_core.backend import standardize_data_format
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras_core.ops import operation_utils
from keras_core.ops.operation import Operation
from keras_core.ops.operation_utils import reduce_shape
class Relu(Operation):
def call(self, x):
return backend.nn.relu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.relu", "keras_core.ops.nn.relu"])
def relu(x):
"""Rectified linear unit activation function.
It is defined as `f(x) = max(0, x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x1 = keras_core.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2])
>>> keras_core.ops.relu(x1)
array([0.0, 0.0, 1.0, 0.2], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Relu().symbolic_call(x)
return backend.nn.relu(x)
class Relu6(Operation):
def call(self, x):
return backend.nn.relu6(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.relu6", "keras_core.ops.nn.relu6"])
def relu6(x):
"""Rectified linear unit activation function with upper bound of 6.
It is defined as `f(x) = np.clip(x, 0, 6)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0])
>>> keras_core.ops.relu6(x)
array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Relu6().symbolic_call(x)
return backend.nn.relu6(x)
class Sigmoid(Operation):
def call(self, x):
return backend.nn.sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.sigmoid", "keras_core.ops.nn.sigmoid"])
def sigmoid(x):
"""Sigmoid activation function.
It is defined as `f(x) = 1 / (1 + exp(-x))`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras_core.ops.sigmoid(x)
array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Sigmoid().symbolic_call(x)
return backend.nn.sigmoid(x)
class Softplus(Operation):
def call(self, x):
return backend.nn.softplus(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.softplus", "keras_core.ops.nn.softplus"])
def softplus(x):
"""Softplus activation function.
It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural
logarithm and `exp` is the exponential function.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-0.555, 0.0, 0.555])
>>> keras_core.ops.softplus(x)
array([0.45366603, 0.6931472, 1.008666], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Softplus().symbolic_call(x)
return backend.nn.softplus(x)
class Softsign(Operation):
def call(self, x):
return backend.nn.softsign(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.softsign", "keras_core.ops.nn.softsign"])
def softsign(x):
"""Softsign activation function.
It is defined as `f(x) = x / (abs(x) + 1)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0])
>>> keras_core.ops.softsign(x)
Array([-0.09090909, -0.90909094, 0.5, 0.0, 0.990099], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Softsign().symbolic_call(x)
return backend.nn.softsign(x)
class Silu(Operation):
def call(self, x):
return backend.nn.silu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.silu",
"keras_core.ops.nn.silu",
"keras_core.ops.swish",
"keras_core.ops.nn.swish",
]
)
def silu(x):
"""Sigmoid Linear Unit (SiLU) activation function, also known as Swish.
The SiLU activation function is computed by the sigmoid function multiplied
by its input. It is defined as `f(x) = x * sigmoid(x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras_core.ops.sigmoid(x)
array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
>>> keras_core.ops.silu(x)
array([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Silu().symbolic_call(x)
return backend.nn.silu(x)
class LogSigmoid(Operation):
def call(self, x):
return backend.nn.log_sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.log_sigmoid",
"keras_core.ops.nn.log_sigmoid",
]
)
def log_sigmoid(x):
"""Logarithm of the sigmoid activation function.
It is defined as `f(x) = log(1 / (1 + exp(-x)))`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0])
>>> keras_core.ops.log_sigmoid(x)
array([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return LogSigmoid().symbolic_call(x)
return backend.nn.log_sigmoid(x)
class LeakyRelu(Operation):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def call(self, x):
return backend.nn.leaky_relu(x, self.negative_slope)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
["keras_core.ops.leaky_relu", "keras_core.ops.nn.leaky_relu"]
)
def leaky_relu(x, negative_slope=0.2):
"""Leaky version of a Rectified Linear Unit activation function.
It allows a small gradient when the unit is not active, it is defined as:
`f(x) = alpha * x for x < 0` or `f(x) = x for x >= 0`.
Args:
x: Input tensor.
negative_slope: Slope of the activation function at x < 0.
Defaults to `0.2`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_leaky_relu = keras_core.ops.leaky_relu(x)
>>> print(x_leaky_relu)
array([-0.2, 0. , 1. ], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return LeakyRelu(negative_slope).symbolic_call(x)
return backend.nn.leaky_relu(x, negative_slope=negative_slope)
class HardSigmoid(Operation):
def call(self, x):
return backend.nn.hard_sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.hard_sigmoid",
"keras_core.ops.nn.hard_sigmoid",
]
)
def hard_sigmoid(x):
"""Hard sigmoid activation function.
It is defined as:
`0 if x < -2.5`, `1 if x > 2.5`, `(0.2 * x) + 0.5 if -2.5 <= x <= 2.5`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_hard_sigmoid = keras_core.ops.hard_sigmoid(x)
>>> print(x_hard_sigmoid)
array([0.3, 0.5, 0.7], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardSigmoid().symbolic_call(x)
return backend.nn.hard_sigmoid(x)
class Elu(Operation):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def call(self, x):
return backend.nn.elu(x, alpha=self.alpha)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.elu", "keras_core.ops.nn.elu"])
def elu(x, alpha=1.0):
"""Exponential Linear Unit activation function.
It is defined as:
`f(x) = alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`.
Args:
x: Input tensor.
alpha: A scalar, slope of positive section. Defaults to `1.0`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_elu = keras_core.ops.elu(x)
>>> print(x_elu)
array([-0.63212055, 0., 1.], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Elu(alpha).symbolic_call(x)
return backend.nn.elu(x, alpha=alpha)
class Selu(Operation):
def call(self, x):
return backend.nn.selu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.selu", "keras_core.ops.nn.selu"])
def selu(x):
"""Scaled Exponential Linear Unit (SELU) activation function.
It is defined as:
`f(x) = scale * alpha * (exp(x) - 1.) for x < 0`,
`f(x) = scale * x for x >= 0`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_selu = keras_core.ops.selu(x)
>>> print(x_selu)
array([-1.11133055, 0., 1.05070098], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Selu().symbolic_call(x)
return backend.nn.selu(x)
class Gelu(Operation):
def __init__(self, approximate=True):
super().__init__()
self.approximate = approximate
def call(self, x):
return backend.nn.gelu(x, self.approximate)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.gelu", "keras_core.ops.nn.gelu"])
def gelu(x, approximate=True):
"""Gaussian Error Linear Unit (GELU) activation function.
If `approximate` is `True`, it is defined as:
`f(x) = 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
Or if `approximate` is `False`, it is defined as:
`f(x) = x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`.
Args:
x: Input tensor.
approximate: Approximate version of GELU activation. Defaults to `True`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_gelu = keras_core.ops.gelu(x)
>>> print(x_gelu)
array([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Gelu(approximate).symbolic_call(x)
return backend.nn.gelu(x, approximate)
class Softmax(Operation):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def call(self, x):
return backend.nn.softmax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.softmax", "keras_core.ops.nn.softmax"])
def softmax(x, axis=-1):
"""Softmax activation function.
The elements of the output vector lie within the range `(0, 1)`, and their
total sum is exactly 1 (excluding the floating point rounding error).
Each vector is processed independently. The `axis` argument specifies the
axis along which the function is applied within the input.
It is defined as:
`f(x) = exp(x) / sum(exp(x))`
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_softmax = keras_core.ops.softmax(x)
>>> print(x_softmax)
array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Softmax(axis).symbolic_call(x)
if isinstance(axis, tuple):
original_shape = x.shape
new_shape = []
skip_dims = set(axis)
i = 0
while i < len(original_shape):
if i in skip_dims:
size = 1
while i in skip_dims:
size *= original_shape[i]
i += 1
new_shape.append(size)
else:
new_shape.append(original_shape[i])
i += 1
x = x.reshape(new_shape)
x = backend.nn.softmax(x, axis=-1)
x = x.reshape(original_shape)
return x
else:
return backend.nn.softmax(x, axis=axis)
class LogSoftmax(Operation):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def call(self, x):
return backend.nn.log_softmax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.log_softmax",
"keras_core.ops.nn.log_softmax",
]
)
def log_softmax(x, axis=-1):
"""Log-softmax activation function.
It is defined as:
`f(x) = x - max(x) - log(sum(exp(x - max(x))))`
Args:
x: Input tensor.
axis: Integer, axis along which the log-softmax is applied.
Defaults to `-1`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_log_softmax = keras_core.ops.log_softmax(x)
>>> print(x_log_softmax)
array([-2.40760596, -1.40760596, -0.40760596], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return LogSoftmax(axis).symbolic_call(x)
if isinstance(axis, tuple):
original_shape = x.shape
new_shape = []
skip_dims = set(axis)
i = 0
while i < len(original_shape):
if i in skip_dims:
size = 1
while i in skip_dims:
size *= original_shape[i]
i += 1
new_shape.append(size)
else:
new_shape.append(original_shape[i])
i += 1
x = x.reshape(new_shape)
x = backend.nn.log_softmax(x, axis=-1)
x = x.reshape(original_shape)
return x
else:
return backend.nn.log_softmax(x, axis=axis)
class MaxPool(Operation):
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
super().__init__()
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
def call(self, inputs):
return backend.nn.max_pool(
inputs,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def compute_output_spec(self, inputs):
output_shape = operation_utils.compute_pooling_output_shape(
inputs.shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(["keras_core.ops.max_pool", "keras_core.ops.nn.max_pool"])
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Max pooling operation.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`. Pooling happens over the spatial
dimensions only.
pool_size: int or tuple/list of integers of size
`len(inputs_spatial_shape)`, specifying the size of the pooling
window for each spatial dimension of the input tensor. If
`pool_size` is int, then every spatial dimension shares the same
`pool_size`.
strides: int or tuple/list of integers of size
`len(inputs_spatial_shape)`. The stride of the sliding window for
each spatial dimension of the input tensor. If `strides` is int,
then every spatial dimension shares the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
Returns:
A tensor of rank N+2, the result of the max pooling operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return MaxPool(
pool_size,
strides,
padding,
data_format,
).symbolic_call(inputs)
return backend.nn.max_pool(inputs, pool_size, strides, padding, data_format)
class AveragePool(Operation):
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
super().__init__()
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
def call(self, inputs):
return backend.nn.average_pool(
inputs,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def compute_output_spec(self, inputs):
output_shape = operation_utils.compute_pooling_output_shape(
inputs.shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(
[
"keras_core.ops.average_pool",
"keras_core.ops.nn.average_pool",
]
)
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Average pooling operation.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,)` + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`. Pooling happens over the spatial
dimensions only.
pool_size: int or tuple/list of integers of size
`len(inputs_spatial_shape)`, specifying the size of the pooling
window for each spatial dimension of the input tensor. If
`pool_size` is int, then every spatial dimension shares the same
`pool_size`.
strides: int or tuple/list of integers of size
`len(inputs_spatial_shape)`. The stride of the sliding window for
each spatial dimension of the input tensor. If `strides` is int,
then every spatial dimension shares the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
Returns:
A tensor of rank N+2, the result of the average pooling operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return AveragePool(
pool_size,
strides,
padding,
data_format,
).symbolic_call(inputs)
return backend.nn.average_pool(
inputs, pool_size, strides, padding, data_format
)
class Conv(Operation):
def __init__(
self,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
super().__init__()
self.strides = strides
self.padding = padding
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(self, inputs, kernel):
return backend.nn.conv(
inputs,
kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def compute_output_spec(self, inputs, kernel):
output_shape = operation_utils.compute_conv_output_shape(
inputs.shape,
kernel.shape[-1],
kernel.shape[:-2],
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(["keras_core.ops.conv", "keras_core.ops.nn.conv"])
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""General N-D convolution.
This ops supports 1D, 2D and 3D convolution.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
kernel: Tensor of rank N+2. `kernel` has shape
`(kernel_spatial_shape, num_input_channels, num_output_channels)`.
`num_input_channels` should match the number of channels in
`inputs`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the conv operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return Conv(strides, padding, data_format, dilation_rate).symbolic_call(
inputs, kernel
)
return backend.nn.conv(
inputs, kernel, strides, padding, data_format, dilation_rate
)
class DepthwiseConv(Operation):
def __init__(
self,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
super().__init__()
self.strides = strides
self.padding = padding
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(self, inputs, kernel):
return backend.nn.depthwise_conv(
inputs,
kernel,
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
def compute_output_spec(self, inputs, kernel):
output_shape = operation_utils.compute_conv_output_shape(
inputs.shape,
kernel.shape[-1] * kernel.shape[-2],
kernel.shape[:-2],
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(
[
"keras_core.ops.depthwise_conv",
"keras_core.ops.nn.depthwise_conv",
]
)
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""General N-D depthwise convolution.
This ops supports 1D and 2D depthwise convolution.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,)` + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
kernel: Tensor of rank N+2. `kernel` has shape
[kernel_spatial_shape, num_input_channels, num_channels_multiplier],
`num_input_channels` should match the number of channels in
`inputs`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the depthwise conv operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return DepthwiseConv(
strides, padding, data_format, dilation_rate
).symbolic_call(inputs, kernel)
return backend.nn.depthwise_conv(
inputs,
kernel,
strides,
padding,
data_format,
dilation_rate,
)
class SeparableConv(Operation):
def __init__(
self,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
super().__init__()
self.strides = strides
self.padding = padding
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(self, inputs, depthwise_kernel, pointwise_kernel):
return backend.nn.separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
def compute_output_spec(self, inputs, depthwise_kernel, pointwise_kernel):
output_shape = list(
depthwise_conv(
inputs,
depthwise_kernel,
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
).shape
)
if self.data_format == "channels_last":
output_shape[-1] = pointwise_kernel.shape[-1]
else:
output_shape[1] = pointwise_kernel.shape[-1]
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(
[
"keras_core.ops.separable_conv",
"keras_core.ops.nn.separable_conv",
]
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""General N-D separable convolution.
This ops supports 1D and 2D separable convolution. `separable_conv` is
a depthwise conv followed by a pointwise conv.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,)` + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
depthwise_kernel: Tensor of rank N+2. `depthwise_kernel` has shape
[kernel_spatial_shape, num_input_channels, num_channels_multiplier],
`num_input_channels` should match the number of channels in
`inputs`.
pointwise_kernel: Tensor of rank N+2. `pointwise_kernel` has shape
`(*ones_like(kernel_spatial_shape),
num_input_channels * num_channels_multiplier, num_output_channels)`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the depthwise conv operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return SeparableConv(
strides,
padding,
data_format,
dilation_rate,
).symbolic_call(inputs, depthwise_kernel, pointwise_kernel)
return backend.nn.separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
class ConvTranspose(Operation):
def __init__(
self,
strides,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
super().__init__()
self.strides = strides
self.output_padding = output_padding
self.padding = padding
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(
self,
inputs,
kernel,
):
return backend.nn.conv_transpose(
inputs,
kernel,
self.strides,
self.output_padding,
self.padding,
self.data_format,
self.dilation_rate,
)
def compute_output_spec(self, inputs, kernel):
kernel_size = kernel.shape[:-2]
filters = kernel.shape[-2]
output_shape = compute_conv_transpose_output_shape(
inputs.shape,
kernel_size,
filters,
self.strides,
self.padding,
self.output_padding,
self.data_format,
self.dilation_rate,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_core_export(
[
"keras_core.ops.conv_transpose",
"keras_core.ops.nn.conv_transpose",
]
)
def conv_transpose(
inputs,
kernel,
strides,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
"""General N-D convolution transpose.
Also known as de-convolution. This ops supports 1D, 2D and 3D convolution.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,)` + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
kernel: Tensor of rank N+2. `kernel` has shape
[kernel_spatial_shape, num_output_channels, num_input_channels],
`num_input_channels` should match the number of channels in
`inputs`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
output_padding: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the amount of padding along the height and width of
the output tensor. Can be a single integer to specify the same
value for all spatial dimensions. The amount of output padding
along a given dimension must be lower than the stride along that
same dimension. If set to `None` (default), the output shape is
inferred.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the conv operation.
"""
data_format = standardize_data_format(data_format)
if any_symbolic_tensors((inputs,)):
return ConvTranspose(
strides, padding, output_padding, data_format, dilation_rate
).symbolic_call(inputs, kernel)
return backend.nn.conv_transpose(
inputs,
kernel,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
class OneHot(Operation):
def __init__(self, num_classes, axis=-1, dtype=None):
super().__init__()
self.num_classes = num_classes
self.axis = axis
self.dtype = dtype or backend.floatx()
def call(self, x):
return backend.nn.one_hot(
x, self.num_classes, axis=self.axis, dtype=self.dtype
)
def compute_output_spec(self, x):
x_shape = list(getattr(x, "shape", []))
if self.axis == -1:
x_shape.append(self.num_classes)
elif self.axis >= 0 and self.axis < len(x_shape):
x_shape.insert(self.axis, self.num_classes)
else:
raise ValueError(
f"axis must be -1 or between [0, {len(x.shape)}), but "
f"received {self.axis}."
)
return KerasTensor(x_shape, dtype=self.dtype)
@keras_core_export(["keras_core.ops.one_hot", "keras_core.ops.nn.one_hot"])
def one_hot(x, num_classes, axis=-1, dtype=None):
"""Converts integer tensor `x` into a one-hot tensor.
The one-hot encoding is a representation where each integer value is
converted into a binary vector with a length equal to `num_classes`,
and the index corresponding to the integer value is marked as 1, while
all other indices are marked as 0.
Args:
x : Integer tensor to be encoded. The shape can be
arbitrary, but the dtype should be integer.
num_classes: Number of classes for the one-hot encoding.
axis: Axis along which the encoding is performed. Defaults to
`-1`, which represents the last axis.
dtype: (Optional) Data type of the output tensor. If not
provided, it defaults to the default data type of the backend.
Returns:
Integer tensor: One-hot encoded tensor with the same shape as `x`
except for the specified `axis` dimension, which will have
a length of `num_classes`. The dtype of the output tensor
is determined by `dtype` or the default data type of the backend.
Example:
>>> x = keras_core.ops.convert_to_tensor([1, 3, 2, 0])
>>> one_hot(x, num_classes=4)
array([[0. 1. 0. 0.]
[0. 0. 0. 1.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]], shape=(4, 4), dtype=float32)
"""
if any_symbolic_tensors((x,)):
return OneHot(num_classes, axis=axis, dtype=dtype).symbolic_call(x)
return backend.nn.one_hot(
x, num_classes, axis=axis, dtype=dtype or backend.floatx()
)
class BinaryCrossentropy(Operation):
def __init__(self, from_logits=False):
super().__init__()
self.from_logits = from_logits
def call(self, target, output):
return backend.nn.binary_crossentropy(
target, output, from_logits=self.from_logits
)
def compute_output_spec(self, target, output):
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
return KerasTensor(output.shape, dtype=output.dtype)
@keras_core_export(
[
"keras_core.ops.binary_crossentropy",
"keras_core.ops.nn.binary_crossentropy",
]
)
def binary_crossentropy(target, output, from_logits=False):
"""Computes binary cross-entropy loss between target and output tensor.
The binary cross-entropy loss is commonly used in binary
classification tasks where each input sample belongs to one
of the two classes. It measures the dissimilarity between the
target and output probabilities or logits.
Args:
target: The target tensor representing the true binary labels.
Its shape should match the shape of the `output` tensor.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the
`target` tensor.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Defaults to`False`.
Returns:
Integer tensor: The computed binary cross-entropy loss between
`target` and `output`.
Example:
>>> target = keras_core.ops.convert_to_tensor([0, 1, 1, 0])
>>> output = keras_core.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2])
>>> binary_crossentropy(target, output)
array([0.10536054 0.10536054 0.22314355 0.22314355],
shape=(4,), dtype=float32)
"""
if any_symbolic_tensors((target, output)):
return BinaryCrossentropy(from_logits=from_logits).symbolic_call(
target, output
)
return backend.nn.binary_crossentropy(
target, output, from_logits=from_logits
)
class CategoricalCrossentropy(Operation):
def __init__(self, from_logits=False, axis=-1):
super().__init__()
self.from_logits = from_logits
self.axis = axis
def call(self, target, output):
return backend.nn.categorical_crossentropy(
target, output, from_logits=self.from_logits, axis=self.axis
)
def compute_output_spec(self, target, output):
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
return KerasTensor(output.shape[:-1], dtype=output.dtype)
@keras_core_export(
[
"keras_core.ops.categorical_crossentropy",
"keras_core.ops.nn.categorical_crossentropy",
]
)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Computes categorical cross-entropy loss between target and output tensor.
The categorical cross-entropy loss is commonly used in multi-class
classification tasks where each input sample can belong to one of
multiple classes. It measures the dissimilarity
between the target and output probabilities or logits.
Args:
target: The target tensor representing the true categorical labels.
Its shape should match the shape of the `output` tensor
except for the last dimension.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the `target`
tensor except for the last dimension.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Defaults to`False`.
axis: (optional) The axis along which the categorical cross-entropy
is computed.
Defaults to `-1`, which corresponds to the last dimension of
the tensors.
Returns:
Integer tensor: The computed categorical cross-entropy loss between
`target` and `output`.
Example:
>>> target = keras_core.ops.convert_to_tensor(
... [[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]])
>>> output = keras_core.ops.convert_to_tensor(
... [[0.9, 0.05, 0.05],
... [0.1, 0.8, 0.1],
... [0.2, 0.3, 0.5]])
>>> categorical_crossentropy(target, output)
array([0.10536054 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
"""
if any_symbolic_tensors((target, output)):
return CategoricalCrossentropy(
from_logits=from_logits, axis=axis
).symbolic_call(target, output)
return backend.nn.categorical_crossentropy(
target, output, from_logits=from_logits, axis=axis
)
class SparseCategoricalCrossentropy(Operation):
def __init__(self, from_logits=False, axis=-1):
super().__init__()
self.from_logits = from_logits
self.axis = axis
def call(self, target, output):
return backend.nn.sparse_categorical_crossentropy(
target, output, from_logits=self.from_logits, axis=self.axis
)
def compute_output_spec(self, target, output):
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
target_shape = target.shape
if len(target_shape) == len(output.shape) and target_shape[-1] == 1:
target_shape = target_shape[:-1]
if target_shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
return KerasTensor(output.shape[:-1], dtype=output.dtype)
@keras_core_export(
[
"keras_core.ops.sparse_categorical_crossentropy",
"keras_core.ops.nn.sparse_categorical_crossentropy",
]
)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Computes sparse categorical cross-entropy loss.
The sparse categorical cross-entropy loss is similar to categorical
cross-entropy, but it is used when the target tensor contains integer
class labels instead of one-hot encoded vectors. It measures the
dissimilarity between the target and output probabilities or logits.
Args:
target: The target tensor representing the true class labels as
integers. Its shape should match the shape of the `output`
tensor except for the last dimension.
output: The output tensor representing the predicted probabilities
or logits.
Its shape should match the shape of the `target` tensor except
for the last dimension.
from_logits: (optional) Whether `output` is a tensor of logits
or probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Defaults to`False`.
axis: (optional) The axis along which the sparse categorical
cross-entropy is computed.
Defaults to `-1`, which corresponds to the last dimension
of the tensors.
Returns:
Integer tensor: The computed sparse categorical cross-entropy
loss between `target` and `output`.
Example:
>>> target = keras_core.ops.convert_to_tensor([0, 1, 2], dtype=int32)
>>> output = keras_core.ops.convert_to_tensor(
... [[0.9, 0.05, 0.05],
... [0.1, 0.8, 0.1],
... [0.2, 0.3, 0.5]])
>>> sparse_categorical_crossentropy(target, output)
array([0.10536056 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
"""
if any_symbolic_tensors((target, output)):
return SparseCategoricalCrossentropy(
from_logits=from_logits, axis=axis
).symbolic_call(target, output)
return backend.nn.sparse_categorical_crossentropy(
target, output, from_logits=from_logits, axis=axis
)
class MultiHot(Operation):
def __init__(self, num_tokens=None, axis=-1, dtype=None, name=None):
super().__init__(name)
self.num_tokens = num_tokens
self.axis = axis
self.dtype = dtype or backend.floatx()
def call(self, inputs):
return backend.nn.multi_hot(
inputs,
num_classes=self.num_tokens,
axis=self.axis,
dtype=self.dtype,
)
def compute_output_spec(self, inputs):
x_shape = list(getattr(inputs, "shape", []))
if self.axis == -1:
x_shape.append(self.num_tokens)
elif self.axis >= 0 and self.axis < len(x_shape):
x_shape.insert(self.axis, self.num_tokens)
else:
raise ValueError(
f"axis must be -1 or between [0, {len(inputs.shape)}), but "
f"received {self.axis}."
)
if len(x_shape) == 2:
x_shape = [x_shape[-1]]
else:
x_shape = [x_shape[0]] + x_shape[2:]
return KerasTensor(x_shape, dtype=inputs.dtype)
@keras_core_export(
[
"keras_core.ops.multi_hot",
"keras_core.ops.nn.multi_hot",
]
)
def multi_hot(inputs, num_tokens, axis=-1, dtype=None):
"""Encodes integer labels as multi-hot vectors.
This function encodes integer labels as multi-hot vectors, where each label
is mapped to a binary value in the resulting vector.
Args:
inputs: Tensor of integer labels to be converted to multi-hot vectors.
num_tokens: Integer, the total number of unique tokens or classes.
axis: (optional) Axis along which the multi-hot encoding should be
added. Defaults to `-1`, which corresponds to the last dimension.
dtype: (optional) The data type of the resulting tensor. Default
is backend's float type.
Returns:
Tensor: The multi-hot encoded tensor.
Example:
>>> data = keras_core.ops.convert_to_tensor([0, 4])
>>> keras_core.ops.multi_hot(data, num_tokens=5)
array([1.0, 0.0, 0.0, 0.0, 1.0], dtype=float32)
"""
if any_symbolic_tensors((inputs,)):
return MultiHot(num_tokens, axis, dtype).symbolic_call(inputs)
return backend.nn.multi_hot(inputs, num_tokens, axis, dtype)
class Moments(Operation):
def __init__(self, axes, keepdims=False, name=None):
super().__init__(name)
self.axes = axes
self.keepdims = keepdims
def call(self, x):
return backend.nn.moments(x, axes=self.axes, keepdims=self.keepdims)
def compute_output_spec(self, x):
return (
KerasTensor(
reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims),
dtype=x.dtype,
),
KerasTensor(
reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims),
dtype=x.dtype,
),
)
@keras_core_export(
[
"keras_core.ops.moments",
"keras_core.ops.nn.moments",
]
)
def moments(x, axes, keepdims=False):
"""Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and
variance of a vector.
Args:
x: Input tensor.
axes: A list of axes which to compute mean and variance.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
A tuple containing two tensors - mean and variance.
Example:
>>> x = keras_core.ops.convert_to_tensor([0, 1, 2, 3, 100], dtype="float32")
>>> keras_core.ops.moments(x, axes=[0])
(array(21.2, dtype=float32), array(1553.3601, dtype=float32))
"""
if any_symbolic_tensors((x,)):
return Moments(axes, keepdims).symbolic_call(x)
return backend.nn.moments(x, axes, keepdims)
| keras-core/keras_core/ops/nn.py/0 | {
"file_path": "keras-core/keras_core/ops/nn.py",
"repo_id": "keras-core",
"token_count": 23621
} | 49 |
# flake8: noqa
import numpy as np
from keras_core import backend
from keras_core import testing
from keras_core.optimizers.adafactor import Adafactor
class AdafactorTest(testing.TestCase):
def test_config(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adafactor(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.3693, 0.6307, 1.6307, 2.6307], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
np.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55],
[0.3031, 0.3026, 0.3025, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024],
[0.1671, 0.1665, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663],
[0.0923, 0.0916, 0.0915, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914],
[0.0554, 0.0548, 0.0546, 0.0546, 0.0546, 0.0546, 0.0546, 0.0545, 0.0545, 0.0545]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adafactor(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adafactor(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| keras-core/keras_core/optimizers/adafactor_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adafactor_test.py",
"repo_id": "keras-core",
"token_count": 1855
} | 50 |