diff --git "a/python_libs_keras.txt" "b/python_libs_keras.txt" new file mode 100644--- /dev/null +++ "b/python_libs_keras.txt" @@ -0,0 +1,56069 @@ +# File: keras-master/api_gen.py +"""""" +import importlib +import os +import re +import shutil +import namex +PACKAGE = 'keras' +BUILD_DIR_NAME = 'tmp_build_dir' + +def ignore_files(_, filenames): + return [f for f in filenames if f.endswith('_test.py')] + +def copy_source_to_build_directory(root_path): + build_dir = os.path.join(root_path, BUILD_DIR_NAME) + if os.path.exists(build_dir): + shutil.rmtree(build_dir) + os.mkdir(build_dir) + shutil.copytree(PACKAGE, os.path.join(build_dir, PACKAGE), ignore=ignore_files) + return build_dir + +def create_legacy_directory(package_dir): + src_dir = os.path.join(package_dir, 'src') + api_dir = os.path.join(package_dir, 'api') + tf_keras_dirpath_parent = os.path.join(api_dir, '_tf_keras') + tf_keras_dirpath = os.path.join(tf_keras_dirpath_parent, 'keras') + os.makedirs(tf_keras_dirpath, exist_ok=True) + with open(os.path.join(tf_keras_dirpath_parent, '__init__.py'), 'w') as f: + f.write('from keras.api._tf_keras import keras\n') + with open(os.path.join(api_dir, '__init__.py')) as f: + init_file = f.read() + init_file = init_file.replace('from keras.api import _legacy', 'from keras.api import _tf_keras') + with open(os.path.join(api_dir, '__init__.py'), 'w') as f: + f.write(init_file) + init_file = init_file.replace('from keras.api import _tf_keras\n', '\n') + with open(os.path.join(tf_keras_dirpath, '__init__.py'), 'w') as f: + f.write(init_file) + for dirname in os.listdir(api_dir): + dirpath = os.path.join(api_dir, dirname) + if os.path.isdir(dirpath) and dirname not in ('_legacy', '_tf_keras', 'src'): + destpath = os.path.join(tf_keras_dirpath, dirname) + if os.path.exists(destpath): + shutil.rmtree(destpath) + shutil.copytree(dirpath, destpath, ignore=ignore_files) + legacy_submodules = [path[:-3] for path in os.listdir(os.path.join(src_dir, 'legacy')) if path.endswith('.py')] + legacy_submodules += [path for path in os.listdir(os.path.join(src_dir, 'legacy')) if os.path.isdir(os.path.join(src_dir, 'legacy', path))] + for (root, _, fnames) in os.walk(os.path.join(api_dir, '_legacy')): + for fname in fnames: + if fname.endswith('.py'): + legacy_fpath = os.path.join(root, fname) + tf_keras_root = root.replace('/_legacy', '/_tf_keras/keras') + core_api_fpath = os.path.join(root.replace('/_legacy', ''), fname) + if not os.path.exists(tf_keras_root): + os.makedirs(tf_keras_root) + tf_keras_fpath = os.path.join(tf_keras_root, fname) + with open(legacy_fpath) as f: + legacy_contents = f.read() + legacy_contents = legacy_contents.replace('keras.api._legacy', 'keras.api._tf_keras.keras') + if os.path.exists(core_api_fpath): + with open(core_api_fpath) as f: + core_api_contents = f.read() + core_api_contents = core_api_contents.replace('from keras.api import _tf_keras\n', '') + for legacy_submodule in legacy_submodules: + core_api_contents = core_api_contents.replace(f'from keras.api import {legacy_submodule}\n', '') + core_api_contents = core_api_contents.replace(f'keras.api.{legacy_submodule}', f'keras.api._tf_keras.keras.{legacy_submodule}') + legacy_contents = re.sub('\\n', '\\\\n', legacy_contents) + legacy_contents = re.sub('""".*"""', '', legacy_contents) + legacy_contents = re.sub('\\\\n', '\\n', legacy_contents) + legacy_imports = re.findall('import (\\w+)', legacy_contents) + for import_name in legacy_imports: + core_api_contents = re.sub(f'\n.* import {import_name}\n', '\\n', core_api_contents) + legacy_contents = core_api_contents + '\n' + legacy_contents + with open(tf_keras_fpath, 'w') as f: + f.write(legacy_contents) + shutil.rmtree(os.path.join(api_dir, '_legacy')) + +def export_version_string(api_init_fname): + with open(api_init_fname) as f: + contents = f.read() + with open(api_init_fname, 'w') as f: + contents += 'from keras.src.version import __version__\n' + f.write(contents) + +def update_package_init(template_fname, dest_fname, api_module): + with open(template_fname) as template_file: + with open(dest_fname, 'w') as dest_file: + for line in template_file: + if '# DO NOT EDIT.' in line: + dest_file.write(line) + for symbol in api_module.__dict__.keys(): + if symbol.startswith('_') and symbol != '__version__': + continue + dest_file.write(f'from keras.api import {symbol}\n') + for line in template_file: + if '# END DO NOT EDIT.' in line: + break + dest_file.write(line) + +def build(): + root_path = os.path.dirname(os.path.abspath(__file__)) + code_api_dir = os.path.join(root_path, PACKAGE, 'api') + code_init_fname = os.path.join(root_path, PACKAGE, '__init__.py') + build_dir = copy_source_to_build_directory(root_path) + build_api_dir = os.path.join(build_dir, PACKAGE, 'api') + build_init_fname = os.path.join(build_dir, PACKAGE, '__init__.py') + build_api_init_fname = os.path.join(build_api_dir, '__init__.py') + try: + os.chdir(build_dir) + if os.path.exists(build_api_dir): + shutil.rmtree(build_api_dir) + if os.path.exists(build_init_fname): + os.remove(build_init_fname) + os.makedirs(build_api_dir) + namex.generate_api_files('keras', code_directory='src', target_directory='api') + export_version_string(build_api_init_fname) + create_legacy_directory(package_dir=os.path.join(build_dir, PACKAGE)) + api_module = importlib.import_module(f'{BUILD_DIR_NAME}.keras.api') + update_package_init(code_init_fname, build_init_fname, api_module) + if os.path.exists(code_api_dir): + shutil.rmtree(code_api_dir) + shutil.copytree(build_api_dir, code_api_dir) + shutil.copy(build_init_fname, code_init_fname) + finally: + shutil.rmtree(build_dir) +if __name__ == '__main__': + build() + +# File: keras-master/guides/custom_train_step_in_jax.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'jax' +import jax +import keras +import numpy as np +'' + +class CustomModel(keras.Model): + + def compute_loss_and_updates(self, trainable_variables, non_trainable_variables, x, y, training=False): + (y_pred, non_trainable_variables) = self.stateless_call(trainable_variables, non_trainable_variables, x, training=training) + loss = self.compute_loss(x, y, y_pred) + return (loss, (y_pred, non_trainable_variables)) + + def train_step(self, state, data): + (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) = state + (x, y) = data + grad_fn = jax.value_and_grad(self.compute_loss_and_updates, has_aux=True) + ((loss, (y_pred, non_trainable_variables)), grads) = grad_fn(trainable_variables, non_trainable_variables, x, y, training=True) + (trainable_variables, optimizer_variables) = self.optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + new_metrics_vars = [] + for metric in self.metrics: + this_metric_vars = metrics_variables[len(new_metrics_vars):len(new_metrics_vars) + len(metric.variables)] + if metric.name == 'loss': + this_metric_vars = metric.stateless_update_state(this_metric_vars, loss) + else: + this_metric_vars = metric.stateless_update_state(this_metric_vars, y, y_pred) + logs = metric.stateless_result(this_metric_vars) + new_metrics_vars += this_metric_vars + state = (trainable_variables, non_trainable_variables, optimizer_variables, new_metrics_vars) + return (logs, state) +'' +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam', loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=3) +'' + +class CustomModel(keras.Model): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.loss_tracker = keras.metrics.Mean(name='loss') + self.mae_metric = keras.metrics.MeanAbsoluteError(name='mae') + self.loss_fn = keras.losses.MeanSquaredError() + + def compute_loss_and_updates(self, trainable_variables, non_trainable_variables, x, y, training=False): + (y_pred, non_trainable_variables) = self.stateless_call(trainable_variables, non_trainable_variables, x, training=training) + loss = self.loss_fn(y, y_pred) + return (loss, (y_pred, non_trainable_variables)) + + def train_step(self, state, data): + (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) = state + (x, y) = data + grad_fn = jax.value_and_grad(self.compute_loss_and_updates, has_aux=True) + ((loss, (y_pred, non_trainable_variables)), grads) = grad_fn(trainable_variables, non_trainable_variables, x, y, training=True) + (trainable_variables, optimizer_variables) = self.optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + loss_tracker_vars = metrics_variables[:len(self.loss_tracker.variables)] + mae_metric_vars = metrics_variables[len(self.loss_tracker.variables):] + loss_tracker_vars = self.loss_tracker.stateless_update_state(loss_tracker_vars, loss) + mae_metric_vars = self.mae_metric.stateless_update_state(mae_metric_vars, y, y_pred) + logs = {} + logs[self.loss_tracker.name] = self.loss_tracker.stateless_result(loss_tracker_vars) + logs[self.mae_metric.name] = self.mae_metric.stateless_result(mae_metric_vars) + new_metrics_vars = loss_tracker_vars + mae_metric_vars + state = (trainable_variables, non_trainable_variables, optimizer_variables, new_metrics_vars) + return (logs, state) + + @property + def metrics(self): + return [self.loss_tracker, self.mae_metric] +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam') +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=5) +'' + +class CustomModel(keras.Model): + + def test_step(self, state, data): + (x, y) = data + (trainable_variables, non_trainable_variables, metrics_variables) = state + (y_pred, non_trainable_variables) = self.stateless_call(trainable_variables, non_trainable_variables, x, training=False) + loss = self.compute_loss(x, y, y_pred) + new_metrics_vars = [] + for metric in self.metrics: + this_metric_vars = metrics_variables[len(new_metrics_vars):len(new_metrics_vars) + len(metric.variables)] + if metric.name == 'loss': + this_metric_vars = metric.stateless_update_state(this_metric_vars, loss) + else: + this_metric_vars = metric.stateless_update_state(this_metric_vars, y, y_pred) + logs = metric.stateless_result(this_metric_vars) + new_metrics_vars += this_metric_vars + state = (trainable_variables, non_trainable_variables, new_metrics_vars) + return (logs, state) +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.evaluate(x, y) +'' + +# File: keras-master/guides/custom_train_step_in_tensorflow.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'tensorflow' +import tensorflow as tf +import keras +from keras import layers +import numpy as np +'' + +class CustomModel(keras.Model): + + def train_step(self, data): + (x, y) = data + with tf.GradientTape() as tape: + y_pred = self(x, training=True) + loss = self.compute_loss(y=y, y_pred=y_pred) + trainable_vars = self.trainable_variables + gradients = tape.gradient(loss, trainable_vars) + self.optimizer.apply(gradients, trainable_vars) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred) + return {m.name: m.result() for m in self.metrics} +'' +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam', loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=3) +'' + +class CustomModel(keras.Model): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.loss_tracker = keras.metrics.Mean(name='loss') + self.mae_metric = keras.metrics.MeanAbsoluteError(name='mae') + self.loss_fn = keras.losses.MeanSquaredError() + + def train_step(self, data): + (x, y) = data + with tf.GradientTape() as tape: + y_pred = self(x, training=True) + loss = self.loss_fn(y, y_pred) + trainable_vars = self.trainable_variables + gradients = tape.gradient(loss, trainable_vars) + self.optimizer.apply(gradients, trainable_vars) + self.loss_tracker.update_state(loss) + self.mae_metric.update_state(y, y_pred) + return {'loss': self.loss_tracker.result(), 'mae': self.mae_metric.result()} + + @property + def metrics(self): + return [self.loss_tracker, self.mae_metric] +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam') +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=5) +'' + +class CustomModel(keras.Model): + + def train_step(self, data): + if len(data) == 3: + (x, y, sample_weight) = data + else: + sample_weight = None + (x, y) = data + with tf.GradientTape() as tape: + y_pred = self(x, training=True) + loss = self.compute_loss(y=y, y_pred=y_pred, sample_weight=sample_weight) + trainable_vars = self.trainable_variables + gradients = tape.gradient(loss, trainable_vars) + self.optimizer.apply(gradients, trainable_vars) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred, sample_weight=sample_weight) + return {m.name: m.result() for m in self.metrics} +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam', loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +sw = np.random.random((1000, 1)) +model.fit(x, y, sample_weight=sw, epochs=3) +'' + +class CustomModel(keras.Model): + + def test_step(self, data): + (x, y) = data + y_pred = self(x, training=False) + loss = self.compute_loss(y=y, y_pred=y_pred) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred) + return {m.name: m.result() for m in self.metrics} +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.evaluate(x, y) +'' +discriminator = keras.Sequential([keras.Input(shape=(28, 28, 1)), layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.GlobalMaxPooling2D(), layers.Dense(1)], name='discriminator') +latent_dim = 128 +generator = keras.Sequential([keras.Input(shape=(latent_dim,)), layers.Dense(7 * 7 * 128), layers.LeakyReLU(negative_slope=0.2), layers.Reshape((7, 7, 128)), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(1, (7, 7), padding='same', activation='sigmoid')], name='generator') +'' + +class GAN(keras.Model): + + def __init__(self, discriminator, generator, latent_dim): + super().__init__() + self.discriminator = discriminator + self.generator = generator + self.latent_dim = latent_dim + self.d_loss_tracker = keras.metrics.Mean(name='d_loss') + self.g_loss_tracker = keras.metrics.Mean(name='g_loss') + self.seed_generator = keras.random.SeedGenerator(1337) + + @property + def metrics(self): + return [self.d_loss_tracker, self.g_loss_tracker] + + def compile(self, d_optimizer, g_optimizer, loss_fn): + super().compile() + self.d_optimizer = d_optimizer + self.g_optimizer = g_optimizer + self.loss_fn = loss_fn + + def train_step(self, real_images): + if isinstance(real_images, tuple): + real_images = real_images[0] + batch_size = tf.shape(real_images)[0] + random_latent_vectors = keras.random.normal(shape=(batch_size, self.latent_dim), seed=self.seed_generator) + generated_images = self.generator(random_latent_vectors) + combined_images = tf.concat([generated_images, real_images], axis=0) + labels = tf.concat([tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0) + labels += 0.05 * keras.random.uniform(tf.shape(labels), seed=self.seed_generator) + with tf.GradientTape() as tape: + predictions = self.discriminator(combined_images) + d_loss = self.loss_fn(labels, predictions) + grads = tape.gradient(d_loss, self.discriminator.trainable_weights) + self.d_optimizer.apply(grads, self.discriminator.trainable_weights) + random_latent_vectors = keras.random.normal(shape=(batch_size, self.latent_dim), seed=self.seed_generator) + misleading_labels = tf.zeros((batch_size, 1)) + with tf.GradientTape() as tape: + predictions = self.discriminator(self.generator(random_latent_vectors)) + g_loss = self.loss_fn(misleading_labels, predictions) + grads = tape.gradient(g_loss, self.generator.trainable_weights) + self.g_optimizer.apply(grads, self.generator.trainable_weights) + self.d_loss_tracker.update_state(d_loss) + self.g_loss_tracker.update_state(g_loss) + return {'d_loss': self.d_loss_tracker.result(), 'g_loss': self.g_loss_tracker.result()} +'' +batch_size = 64 +((x_train, _), (x_test, _)) = keras.datasets.mnist.load_data() +all_digits = np.concatenate([x_train, x_test]) +all_digits = all_digits.astype('float32') / 255.0 +all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) +dataset = tf.data.Dataset.from_tensor_slices(all_digits) +dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) +gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) +gan.compile(d_optimizer=keras.optimizers.Adam(learning_rate=0.0003), g_optimizer=keras.optimizers.Adam(learning_rate=0.0003), loss_fn=keras.losses.BinaryCrossentropy(from_logits=True)) +gan.fit(dataset.take(100), epochs=1) +'' + +# File: keras-master/guides/custom_train_step_in_torch.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'torch' +import torch +import keras +from keras import layers +import numpy as np +'' + +class CustomModel(keras.Model): + + def train_step(self, data): + (x, y) = data + self.zero_grad() + y_pred = self(x, training=True) + loss = self.compute_loss(y=y, y_pred=y_pred) + loss.backward() + trainable_weights = [v for v in self.trainable_weights] + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + self.optimizer.apply(gradients, trainable_weights) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred) + return {m.name: m.result() for m in self.metrics} +'' +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam', loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=3) +'' + +class CustomModel(keras.Model): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.loss_tracker = keras.metrics.Mean(name='loss') + self.mae_metric = keras.metrics.MeanAbsoluteError(name='mae') + self.loss_fn = keras.losses.MeanSquaredError() + + def train_step(self, data): + (x, y) = data + self.zero_grad() + y_pred = self(x, training=True) + loss = self.loss_fn(y, y_pred) + loss.backward() + trainable_weights = [v for v in self.trainable_weights] + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + self.optimizer.apply(gradients, trainable_weights) + self.loss_tracker.update_state(loss) + self.mae_metric.update_state(y, y_pred) + return {'loss': self.loss_tracker.result(), 'mae': self.mae_metric.result()} + + @property + def metrics(self): + return [self.loss_tracker, self.mae_metric] +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam') +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.fit(x, y, epochs=5) +'' + +class CustomModel(keras.Model): + + def train_step(self, data): + if len(data) == 3: + (x, y, sample_weight) = data + else: + sample_weight = None + (x, y) = data + self.zero_grad() + y_pred = self(x, training=True) + loss = self.compute_loss(y=y, y_pred=y_pred, sample_weight=sample_weight) + loss.backward() + trainable_weights = [v for v in self.trainable_weights] + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + self.optimizer.apply(gradients, trainable_weights) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred, sample_weight=sample_weight) + return {m.name: m.result() for m in self.metrics} +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(optimizer='adam', loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +sw = np.random.random((1000, 1)) +model.fit(x, y, sample_weight=sw, epochs=3) +'' + +class CustomModel(keras.Model): + + def test_step(self, data): + (x, y) = data + y_pred = self(x, training=False) + loss = self.compute_loss(y=y, y_pred=y_pred) + for metric in self.metrics: + if metric.name == 'loss': + metric.update_state(loss) + else: + metric.update_state(y, y_pred) + return {m.name: m.result() for m in self.metrics} +inputs = keras.Input(shape=(32,)) +outputs = keras.layers.Dense(1)(inputs) +model = CustomModel(inputs, outputs) +model.compile(loss='mse', metrics=['mae']) +x = np.random.random((1000, 32)) +y = np.random.random((1000, 1)) +model.evaluate(x, y) +'' +discriminator = keras.Sequential([keras.Input(shape=(28, 28, 1)), layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.GlobalMaxPooling2D(), layers.Dense(1)], name='discriminator') +latent_dim = 128 +generator = keras.Sequential([keras.Input(shape=(latent_dim,)), layers.Dense(7 * 7 * 128), layers.LeakyReLU(negative_slope=0.2), layers.Reshape((7, 7, 128)), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(1, (7, 7), padding='same', activation='sigmoid')], name='generator') +'' + +class GAN(keras.Model): + + def __init__(self, discriminator, generator, latent_dim): + super().__init__() + self.discriminator = discriminator + self.generator = generator + self.latent_dim = latent_dim + self.d_loss_tracker = keras.metrics.Mean(name='d_loss') + self.g_loss_tracker = keras.metrics.Mean(name='g_loss') + self.seed_generator = keras.random.SeedGenerator(1337) + self.built = True + + @property + def metrics(self): + return [self.d_loss_tracker, self.g_loss_tracker] + + def compile(self, d_optimizer, g_optimizer, loss_fn): + super().compile() + self.d_optimizer = d_optimizer + self.g_optimizer = g_optimizer + self.loss_fn = loss_fn + + def train_step(self, real_images): + if isinstance(real_images, tuple): + real_images = real_images[0] + batch_size = real_images.shape[0] + random_latent_vectors = keras.random.normal(shape=(batch_size, self.latent_dim), seed=self.seed_generator) + generated_images = self.generator(random_latent_vectors) + real_images = torch.tensor(real_images) + combined_images = torch.concat([generated_images, real_images], axis=0) + labels = torch.concat([torch.ones((batch_size, 1)), torch.zeros((batch_size, 1))], axis=0) + labels += 0.05 * keras.random.uniform(labels.shape, seed=self.seed_generator) + self.zero_grad() + predictions = self.discriminator(combined_images) + d_loss = self.loss_fn(labels, predictions) + d_loss.backward() + grads = [v.value.grad for v in self.discriminator.trainable_weights] + with torch.no_grad(): + self.d_optimizer.apply(grads, self.discriminator.trainable_weights) + random_latent_vectors = keras.random.normal(shape=(batch_size, self.latent_dim), seed=self.seed_generator) + misleading_labels = torch.zeros((batch_size, 1)) + self.zero_grad() + predictions = self.discriminator(self.generator(random_latent_vectors)) + g_loss = self.loss_fn(misleading_labels, predictions) + grads = g_loss.backward() + grads = [v.value.grad for v in self.generator.trainable_weights] + with torch.no_grad(): + self.g_optimizer.apply(grads, self.generator.trainable_weights) + self.d_loss_tracker.update_state(d_loss) + self.g_loss_tracker.update_state(g_loss) + return {'d_loss': self.d_loss_tracker.result(), 'g_loss': self.g_loss_tracker.result()} +'' +batch_size = 64 +((x_train, _), (x_test, _)) = keras.datasets.mnist.load_data() +all_digits = np.concatenate([x_train, x_test]) +all_digits = all_digits.astype('float32') / 255.0 +all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) +dataset = torch.utils.data.TensorDataset(torch.from_numpy(all_digits), torch.from_numpy(all_digits)) +dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) +gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) +gan.compile(d_optimizer=keras.optimizers.Adam(learning_rate=0.0003), g_optimizer=keras.optimizers.Adam(learning_rate=0.0003), loss_fn=keras.losses.BinaryCrossentropy(from_logits=True)) +gan.fit(dataloader, epochs=1) +'' + +# File: keras-master/guides/distributed_training_with_jax.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'jax' +import jax +import numpy as np +import tensorflow as tf +import keras +from jax.experimental import mesh_utils +from jax.sharding import Mesh +from jax.sharding import NamedSharding +from jax.sharding import PartitionSpec as P + +def get_model(): + inputs = keras.Input(shape=(28, 28, 1)) + x = keras.layers.Rescaling(1.0 / 255.0)(inputs) + x = keras.layers.Conv2D(filters=12, kernel_size=3, padding='same', use_bias=False)(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.Conv2D(filters=24, kernel_size=6, use_bias=False, strides=2)(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.Conv2D(filters=32, kernel_size=6, padding='same', strides=2, name='large_k')(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.GlobalAveragePooling2D()(x) + x = keras.layers.Dense(256, activation='relu')(x) + x = keras.layers.Dropout(0.5)(x) + outputs = keras.layers.Dense(10)(x) + model = keras.Model(inputs, outputs) + return model + +def get_datasets(): + ((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() + x_train = x_train.astype('float32') + x_test = x_test.astype('float32') + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + print('x_train shape:', x_train.shape) + print(x_train.shape[0], 'train samples') + print(x_test.shape[0], 'test samples') + train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) + eval_data = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + return (train_data, eval_data) +'' +num_epochs = 2 +batch_size = 64 +(train_data, eval_data) = get_datasets() +train_data = train_data.batch(batch_size, drop_remainder=True) +model = get_model() +optimizer = keras.optimizers.Adam(0.001) +loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) +(one_batch, one_batch_labels) = next(iter(train_data)) +model.build(one_batch) +optimizer.build(model.trainable_variables) + +def compute_loss(trainable_variables, non_trainable_variables, x, y): + (y_pred, updated_non_trainable_variables) = model.stateless_call(trainable_variables, non_trainable_variables, x) + loss_value = loss(y, y_pred) + return (loss_value, updated_non_trainable_variables) +compute_gradients = jax.value_and_grad(compute_loss, has_aux=True) + +@jax.jit +def train_step(train_state, x, y): + (trainable_variables, non_trainable_variables, optimizer_variables) = train_state + ((loss_value, non_trainable_variables), grads) = compute_gradients(trainable_variables, non_trainable_variables, x, y) + (trainable_variables, optimizer_variables) = optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + return (loss_value, (trainable_variables, non_trainable_variables, optimizer_variables)) + +def get_replicated_train_state(devices): + var_mesh = Mesh(devices, axis_names='_') + var_replication = NamedSharding(var_mesh, P()) + trainable_variables = jax.device_put(model.trainable_variables, var_replication) + non_trainable_variables = jax.device_put(model.non_trainable_variables, var_replication) + optimizer_variables = jax.device_put(optimizer.variables, var_replication) + return (trainable_variables, non_trainable_variables, optimizer_variables) +num_devices = len(jax.local_devices()) +print(f'Running on {num_devices} devices: {jax.local_devices()}') +devices = mesh_utils.create_device_mesh((num_devices,)) +data_mesh = Mesh(devices, axis_names=('batch',)) +data_sharding = NamedSharding(data_mesh, P('batch')) +(x, y) = next(iter(train_data)) +sharded_x = jax.device_put(x.numpy(), data_sharding) +print('Data sharding') +jax.debug.visualize_array_sharding(jax.numpy.reshape(sharded_x, [-1, 28 * 28])) +train_state = get_replicated_train_state(devices) +for epoch in range(num_epochs): + data_iter = iter(train_data) + for data in data_iter: + (x, y) = data + sharded_x = jax.device_put(x.numpy(), data_sharding) + (loss_value, train_state) = train_step(train_state, sharded_x, y.numpy()) + print('Epoch', epoch, 'loss:', loss_value) +(trainable_variables, non_trainable_variables, optimizer_variables) = train_state +for (variable, value) in zip(model.trainable_variables, trainable_variables): + variable.assign(value) +for (variable, value) in zip(model.non_trainable_variables, non_trainable_variables): + variable.assign(value) +'' + +# File: keras-master/guides/distributed_training_with_tensorflow.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'tensorflow' +import tensorflow as tf +import keras +'' + +def get_compiled_model(): + inputs = keras.Input(shape=(784,)) + x = keras.layers.Dense(256, activation='relu')(inputs) + x = keras.layers.Dense(256, activation='relu')(x) + outputs = keras.layers.Dense(10)(x) + model = keras.Model(inputs, outputs) + model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()]) + return model + +def get_dataset(): + batch_size = 32 + num_val_samples = 10000 + ((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() + x_train = x_train.reshape(-1, 784).astype('float32') / 255 + x_test = x_test.reshape(-1, 784).astype('float32') / 255 + y_train = y_train.astype('float32') + y_test = y_test.astype('float32') + x_val = x_train[-num_val_samples:] + y_val = y_train[-num_val_samples:] + x_train = x_train[:-num_val_samples] + y_train = y_train[:-num_val_samples] + return (tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size), tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size), tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)) +strategy = tf.distribute.MirroredStrategy() +print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) +with strategy.scope(): + model = get_compiled_model() + (train_dataset, val_dataset, test_dataset) = get_dataset() + model.fit(train_dataset, epochs=2, validation_data=val_dataset) + model.evaluate(test_dataset) +'' +checkpoint_dir = './ckpt' +if not os.path.exists(checkpoint_dir): + os.makedirs(checkpoint_dir) + +def make_or_restore_model(): + checkpoints = [checkpoint_dir + '/' + name for name in os.listdir(checkpoint_dir)] + if checkpoints: + latest_checkpoint = max(checkpoints, key=os.path.getctime) + print('Restoring from', latest_checkpoint) + return keras.models.load_model(latest_checkpoint) + print('Creating a new model') + return get_compiled_model() + +def run_training(epochs=1): + strategy = tf.distribute.MirroredStrategy() + with strategy.scope(): + model = make_or_restore_model() + callbacks = [keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir + '/ckpt-{epoch}.keras', save_freq='epoch')] + model.fit(train_dataset, epochs=epochs, callbacks=callbacks, validation_data=val_dataset, verbose=2) +run_training(epochs=1) +run_training(epochs=1) +'' +'' + +# File: keras-master/guides/distributed_training_with_torch.py +"""""" +'' +'' +import os +os.environ['KERAS_BACKEND'] = 'torch' +import torch +import numpy as np +import keras + +def get_model(): + inputs = keras.Input(shape=(28, 28, 1)) + x = keras.layers.Rescaling(1.0 / 255.0)(inputs) + x = keras.layers.Conv2D(filters=12, kernel_size=3, padding='same', use_bias=False)(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.Conv2D(filters=24, kernel_size=6, use_bias=False, strides=2)(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.Conv2D(filters=32, kernel_size=6, padding='same', strides=2, name='large_k')(x) + x = keras.layers.BatchNormalization(scale=False, center=True)(x) + x = keras.layers.ReLU()(x) + x = keras.layers.GlobalAveragePooling2D()(x) + x = keras.layers.Dense(256, activation='relu')(x) + x = keras.layers.Dropout(0.5)(x) + outputs = keras.layers.Dense(10)(x) + model = keras.Model(inputs, outputs) + return model + +def get_dataset(): + ((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() + x_train = x_train.astype('float32') + x_test = x_test.astype('float32') + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + print('x_train shape:', x_train.shape) + dataset = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) + return dataset +'' + +def train_model(model, dataloader, num_epochs, optimizer, loss_fn): + for epoch in range(num_epochs): + running_loss = 0.0 + running_loss_count = 0 + for (batch_idx, (inputs, targets)) in enumerate(dataloader): + inputs = inputs.cuda(non_blocking=True) + targets = targets.cuda(non_blocking=True) + outputs = model(inputs) + loss = loss_fn(outputs, targets) + optimizer.zero_grad() + loss.backward() + optimizer.step() + running_loss += loss.item() + running_loss_count += 1 + print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {running_loss / running_loss_count}') +'' +num_gpu = torch.cuda.device_count() +num_epochs = 2 +batch_size = 64 +print(f'Running on {num_gpu} GPUs') + +def setup_device(current_gpu_index, num_gpus): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '56492' + device = torch.device('cuda:{}'.format(current_gpu_index)) + torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=num_gpus, rank=current_gpu_index) + torch.cuda.set_device(device) + +def cleanup(): + torch.distributed.destroy_process_group() + +def prepare_dataloader(dataset, current_gpu_index, num_gpus, batch_size): + sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=num_gpus, rank=current_gpu_index, shuffle=False) + dataloader = torch.utils.data.DataLoader(dataset, sampler=sampler, batch_size=batch_size, shuffle=False) + return dataloader + +def per_device_launch_fn(current_gpu_index, num_gpu): + setup_device(current_gpu_index, num_gpu) + dataset = get_dataset() + model = get_model() + dataloader = prepare_dataloader(dataset, current_gpu_index, num_gpu, batch_size) + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + loss_fn = torch.nn.CrossEntropyLoss() + model = model.to(current_gpu_index) + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[current_gpu_index], output_device=current_gpu_index) + train_model(ddp_model, dataloader, num_epochs, optimizer, loss_fn) + cleanup() +'' +if __name__ == '__main__': + torch.multiprocessing.start_processes(per_device_launch_fn, args=(num_gpu,), nprocs=num_gpu, join=True, start_method='fork') +'' + +# File: keras-master/guides/functional_api.py +"""""" +'' +import numpy as np +import keras +from keras import layers +from keras import ops +'' +inputs = keras.Input(shape=(784,)) +'' +img_inputs = keras.Input(shape=(32, 32, 3)) +'' +inputs.shape +'' +inputs.dtype +'' +dense = layers.Dense(64, activation='relu') +x = dense(inputs) +'' +x = layers.Dense(64, activation='relu')(x) +outputs = layers.Dense(10)(x) +'' +model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model') +'' +model.summary() +'' +keras.utils.plot_model(model, 'my_first_model.png') +'' +keras.utils.plot_model(model, 'my_first_model_with_shape_info.png', show_shapes=True) +'' +'' +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = x_train.reshape(60000, 784).astype('float32') / 255 +x_test = x_test.reshape(10000, 784).astype('float32') / 255 +model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.RMSprop(), metrics=['accuracy']) +history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_split=0.2) +test_scores = model.evaluate(x_test, y_test, verbose=2) +print('Test loss:', test_scores[0]) +print('Test accuracy:', test_scores[1]) +'' +'' +model.save('my_model.keras') +del model +model = keras.models.load_model('my_model.keras') +'' +'' +encoder_input = keras.Input(shape=(28, 28, 1), name='img') +x = layers.Conv2D(16, 3, activation='relu')(encoder_input) +x = layers.Conv2D(32, 3, activation='relu')(x) +x = layers.MaxPooling2D(3)(x) +x = layers.Conv2D(32, 3, activation='relu')(x) +x = layers.Conv2D(16, 3, activation='relu')(x) +encoder_output = layers.GlobalMaxPooling2D()(x) +encoder = keras.Model(encoder_input, encoder_output, name='encoder') +encoder.summary() +x = layers.Reshape((4, 4, 1))(encoder_output) +x = layers.Conv2DTranspose(16, 3, activation='relu')(x) +x = layers.Conv2DTranspose(32, 3, activation='relu')(x) +x = layers.UpSampling2D(3)(x) +x = layers.Conv2DTranspose(16, 3, activation='relu')(x) +decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x) +autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder') +autoencoder.summary() +'' +'' +encoder_input = keras.Input(shape=(28, 28, 1), name='original_img') +x = layers.Conv2D(16, 3, activation='relu')(encoder_input) +x = layers.Conv2D(32, 3, activation='relu')(x) +x = layers.MaxPooling2D(3)(x) +x = layers.Conv2D(32, 3, activation='relu')(x) +x = layers.Conv2D(16, 3, activation='relu')(x) +encoder_output = layers.GlobalMaxPooling2D()(x) +encoder = keras.Model(encoder_input, encoder_output, name='encoder') +encoder.summary() +decoder_input = keras.Input(shape=(16,), name='encoded_img') +x = layers.Reshape((4, 4, 1))(decoder_input) +x = layers.Conv2DTranspose(16, 3, activation='relu')(x) +x = layers.Conv2DTranspose(32, 3, activation='relu')(x) +x = layers.UpSampling2D(3)(x) +x = layers.Conv2DTranspose(16, 3, activation='relu')(x) +decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x) +decoder = keras.Model(decoder_input, decoder_output, name='decoder') +decoder.summary() +autoencoder_input = keras.Input(shape=(28, 28, 1), name='img') +encoded_img = encoder(autoencoder_input) +decoded_img = decoder(encoded_img) +autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder') +autoencoder.summary() +'' + +def get_model(): + inputs = keras.Input(shape=(128,)) + outputs = layers.Dense(1)(inputs) + return keras.Model(inputs, outputs) +model1 = get_model() +model2 = get_model() +model3 = get_model() +inputs = keras.Input(shape=(128,)) +y1 = model1(inputs) +y2 = model2(inputs) +y3 = model3(inputs) +outputs = layers.average([y1, y2, y3]) +ensemble_model = keras.Model(inputs=inputs, outputs=outputs) +'' +num_tags = 12 +num_words = 10000 +num_departments = 4 +title_input = keras.Input(shape=(None,), name='title') +body_input = keras.Input(shape=(None,), name='body') +tags_input = keras.Input(shape=(num_tags,), name='tags') +title_features = layers.Embedding(num_words, 64)(title_input) +body_features = layers.Embedding(num_words, 64)(body_input) +title_features = layers.LSTM(128)(title_features) +body_features = layers.LSTM(32)(body_features) +x = layers.concatenate([title_features, body_features, tags_input]) +priority_pred = layers.Dense(1, name='priority')(x) +department_pred = layers.Dense(num_departments, name='department')(x) +model = keras.Model(inputs=[title_input, body_input, tags_input], outputs={'priority': priority_pred, 'department': department_pred}) +'' +keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=[keras.losses.BinaryCrossentropy(from_logits=True), keras.losses.CategoricalCrossentropy(from_logits=True)], loss_weights=[1.0, 0.2]) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss={'priority': keras.losses.BinaryCrossentropy(from_logits=True), 'department': keras.losses.CategoricalCrossentropy(from_logits=True)}, loss_weights={'priority': 1.0, 'department': 0.2}) +'' +title_data = np.random.randint(num_words, size=(1280, 10)) +body_data = np.random.randint(num_words, size=(1280, 100)) +tags_data = np.random.randint(2, size=(1280, num_tags)).astype('float32') +priority_targets = np.random.random(size=(1280, 1)) +dept_targets = np.random.randint(2, size=(1280, num_departments)) +model.fit({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets}, epochs=2, batch_size=32) +'' +'' +inputs = keras.Input(shape=(32, 32, 3), name='img') +x = layers.Conv2D(32, 3, activation='relu')(inputs) +x = layers.Conv2D(64, 3, activation='relu')(x) +block_1_output = layers.MaxPooling2D(3)(x) +x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output) +x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) +block_2_output = layers.add([x, block_1_output]) +x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output) +x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) +block_3_output = layers.add([x, block_2_output]) +x = layers.Conv2D(64, 3, activation='relu')(block_3_output) +x = layers.GlobalAveragePooling2D()(x) +x = layers.Dense(256, activation='relu')(x) +x = layers.Dropout(0.5)(x) +outputs = layers.Dense(10)(x) +model = keras.Model(inputs, outputs, name='toy_resnet') +model.summary() +'' +keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True) +'' +((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data() +x_train = x_train.astype('float32') / 255.0 +x_test = x_test.astype('float32') / 255.0 +y_train = keras.utils.to_categorical(y_train, 10) +y_test = keras.utils.to_categorical(y_test, 10) +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['acc']) +model.fit(x_train[:1000], y_train[:1000], batch_size=64, epochs=1, validation_split=0.2) +'' +shared_embedding = layers.Embedding(1000, 128) +text_input_a = keras.Input(shape=(None,), dtype='int32') +text_input_b = keras.Input(shape=(None,), dtype='int32') +encoded_input_a = shared_embedding(text_input_a) +encoded_input_b = shared_embedding(text_input_b) +'' +vgg19 = keras.applications.VGG19() +'' +features_list = [layer.output for layer in vgg19.layers] +'' +feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list) +img = np.random.random((1, 224, 224, 3)).astype('float32') +extracted_features = feat_extraction_model(img) +'' +'' + +class CustomDense(layers.Layer): + + def __init__(self, units=32): + super().__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b +inputs = keras.Input((4,)) +outputs = CustomDense(10)(inputs) +model = keras.Model(inputs, outputs) +'' + +class CustomDense(layers.Layer): + + def __init__(self, units=32): + super().__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b + + def get_config(self): + return {'units': self.units} +inputs = keras.Input((4,)) +outputs = CustomDense(10)(inputs) +model = keras.Model(inputs, outputs) +config = model.get_config() +new_model = keras.Model.from_config(config, custom_objects={'CustomDense': CustomDense}) +'' +'' +'' +units = 32 +timesteps = 10 +input_dim = 5 +inputs = keras.Input((None, units)) +x = layers.GlobalAveragePooling1D()(inputs) +outputs = layers.Dense(1)(x) +model = keras.Model(inputs, outputs) + +class CustomRNN(layers.Layer): + + def __init__(self): + super().__init__() + self.units = units + self.projection_1 = layers.Dense(units=units, activation='tanh') + self.projection_2 = layers.Dense(units=units, activation='tanh') + self.classifier = model + + def call(self, inputs): + outputs = [] + state = ops.zeros(shape=(inputs.shape[0], self.units)) + for t in range(inputs.shape[1]): + x = inputs[:, t, :] + h = self.projection_1(x) + y = h + self.projection_2(state) + state = y + outputs.append(y) + features = ops.stack(outputs, axis=1) + print(features.shape) + return self.classifier(features) +rnn_model = CustomRNN() +_ = rnn_model(ops.zeros((1, timesteps, input_dim))) +'' +units = 32 +timesteps = 10 +input_dim = 5 +batch_size = 16 + +class CustomRNN(layers.Layer): + + def __init__(self): + super().__init__() + self.units = units + self.projection_1 = layers.Dense(units=units, activation='tanh') + self.projection_2 = layers.Dense(units=units, activation='tanh') + self.classifier = layers.Dense(1) + + def call(self, inputs): + outputs = [] + state = ops.zeros(shape=(inputs.shape[0], self.units)) + for t in range(inputs.shape[1]): + x = inputs[:, t, :] + h = self.projection_1(x) + y = h + self.projection_2(state) + state = y + outputs.append(y) + features = ops.stack(outputs, axis=1) + return self.classifier(features) +inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim)) +x = layers.Conv1D(32, 3)(inputs) +outputs = CustomRNN()(x) +model = keras.Model(inputs, outputs) +rnn_model = CustomRNN() +_ = rnn_model(ops.zeros((1, 10, 5))) + +# File: keras-master/guides/making_new_layers_and_models_via_subclassing.py +"""""" +'' +'' +import numpy as np +import keras +from keras import ops +from keras import layers +'' + +class Linear(keras.layers.Layer): + + def __init__(self, units=32, input_dim=32): + super().__init__() + self.w = self.add_weight(shape=(input_dim, units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(units,), initializer='zeros', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b +'' +x = ops.ones((2, 2)) +linear_layer = Linear(4, 2) +y = linear_layer(x) +print(y) +'' +assert linear_layer.weights == [linear_layer.w, linear_layer.b] +'' + +class ComputeSum(keras.layers.Layer): + + def __init__(self, input_dim): + super().__init__() + self.total = self.add_weight(initializer='zeros', shape=(input_dim,), trainable=False) + + def call(self, inputs): + self.total.assign_add(ops.sum(inputs, axis=0)) + return self.total +x = ops.ones((2, 2)) +my_sum = ComputeSum(2) +y = my_sum(x) +print(y.numpy()) +y = my_sum(x) +print(y.numpy()) +'' +print('weights:', len(my_sum.weights)) +print('non-trainable weights:', len(my_sum.non_trainable_weights)) +print('trainable_weights:', my_sum.trainable_weights) +'' + +class Linear(keras.layers.Layer): + + def __init__(self, units=32, input_dim=32): + super().__init__() + self.w = self.add_weight(shape=(input_dim, units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(units,), initializer='zeros', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b +'' + +class Linear(keras.layers.Layer): + + def __init__(self, units=32): + super().__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b +'' +linear_layer = Linear(32) +y = linear_layer(x) +'' +'' + +class MLPBlock(keras.layers.Layer): + + def __init__(self): + super().__init__() + self.linear_1 = Linear(32) + self.linear_2 = Linear(32) + self.linear_3 = Linear(1) + + def call(self, inputs): + x = self.linear_1(inputs) + x = keras.activations.relu(x) + x = self.linear_2(x) + x = keras.activations.relu(x) + return self.linear_3(x) +mlp = MLPBlock() +y = mlp(ops.ones(shape=(3, 64))) +print('weights:', len(mlp.weights)) +print('trainable weights:', len(mlp.trainable_weights)) +'' +'' + +class ActivityRegularizationLayer(keras.layers.Layer): + + def __init__(self, rate=0.01): + super().__init__() + self.rate = rate + + def call(self, inputs): + self.add_loss(self.rate * ops.mean(inputs)) + return inputs +'' + +class OuterLayer(keras.layers.Layer): + + def __init__(self): + super().__init__() + self.activity_reg = ActivityRegularizationLayer(0.01) + + def call(self, inputs): + return self.activity_reg(inputs) +layer = OuterLayer() +assert len(layer.losses) == 0 +_ = layer(ops.zeros((1, 1))) +assert len(layer.losses) == 1 +_ = layer(ops.zeros((1, 1))) +assert len(layer.losses) == 1 +'' + +class OuterLayerWithKernelRegularizer(keras.layers.Layer): + + def __init__(self): + super().__init__() + self.dense = keras.layers.Dense(32, kernel_regularizer=keras.regularizers.l2(0.001)) + + def call(self, inputs): + return self.dense(inputs) +layer = OuterLayerWithKernelRegularizer() +_ = layer(ops.zeros((1, 1))) +print(layer.losses) +'' +inputs = keras.Input(shape=(3,)) +outputs = ActivityRegularizationLayer()(inputs) +model = keras.Model(inputs, outputs) +model.compile(optimizer='adam', loss='mse') +model.fit(np.random.random((2, 3)), np.random.random((2, 3))) +model.compile(optimizer='adam') +model.fit(np.random.random((2, 3)), np.random.random((2, 3))) +'' + +class Linear(keras.layers.Layer): + + def __init__(self, units=32): + super().__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b + + def get_config(self): + return {'units': self.units} +layer = Linear(64) +config = layer.get_config() +print(config) +new_layer = Linear.from_config(config) +'' + +class Linear(keras.layers.Layer): + + def __init__(self, units=32, **kwargs): + super().__init__(**kwargs) + self.units = units + + def build(self, input_shape): + self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) + self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) + + def call(self, inputs): + return ops.matmul(inputs, self.w) + self.b + + def get_config(self): + config = super().get_config() + config.update({'units': self.units}) + return config +layer = Linear(64) +config = layer.get_config() +print(config) +new_layer = Linear.from_config(config) +'' +'' + +class CustomDropout(keras.layers.Layer): + + def __init__(self, rate, **kwargs): + super().__init__(**kwargs) + self.rate = rate + + def call(self, inputs, training=None): + if training: + return keras.random.dropout(inputs, rate=self.rate) + return inputs +'' +'' +'' +'' + +class Sampling(layers.Layer): + + def call(self, inputs): + (z_mean, z_log_var) = inputs + batch = ops.shape(z_mean)[0] + dim = ops.shape(z_mean)[1] + epsilon = keras.random.normal(shape=(batch, dim)) + return z_mean + ops.exp(0.5 * z_log_var) * epsilon + +class Encoder(layers.Layer): + + def __init__(self, latent_dim=32, intermediate_dim=64, name='encoder', **kwargs): + super().__init__(name=name, **kwargs) + self.dense_proj = layers.Dense(intermediate_dim, activation='relu') + self.dense_mean = layers.Dense(latent_dim) + self.dense_log_var = layers.Dense(latent_dim) + self.sampling = Sampling() + + def call(self, inputs): + x = self.dense_proj(inputs) + z_mean = self.dense_mean(x) + z_log_var = self.dense_log_var(x) + z = self.sampling((z_mean, z_log_var)) + return (z_mean, z_log_var, z) + +class Decoder(layers.Layer): + + def __init__(self, original_dim, intermediate_dim=64, name='decoder', **kwargs): + super().__init__(name=name, **kwargs) + self.dense_proj = layers.Dense(intermediate_dim, activation='relu') + self.dense_output = layers.Dense(original_dim, activation='sigmoid') + + def call(self, inputs): + x = self.dense_proj(inputs) + return self.dense_output(x) + +class VariationalAutoEncoder(keras.Model): + + def __init__(self, original_dim, intermediate_dim=64, latent_dim=32, name='autoencoder', **kwargs): + super().__init__(name=name, **kwargs) + self.original_dim = original_dim + self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim) + self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim) + + def call(self, inputs): + (z_mean, z_log_var, z) = self.encoder(inputs) + reconstructed = self.decoder(z) + kl_loss = -0.5 * ops.mean(z_log_var - ops.square(z_mean) - ops.exp(z_log_var) + 1) + self.add_loss(kl_loss) + return reconstructed +'' +((x_train, _), _) = keras.datasets.mnist.load_data() +x_train = x_train.reshape(60000, 784).astype('float32') / 255 +original_dim = 784 +vae = VariationalAutoEncoder(784, 64, 32) +optimizer = keras.optimizers.Adam(learning_rate=0.001) +vae.compile(optimizer, loss=keras.losses.MeanSquaredError()) +vae.fit(x_train, x_train, epochs=2, batch_size=64) + +# File: keras-master/guides/sequential_model.py +"""""" +'' +import keras +from keras import layers +from keras import ops +'' +model = keras.Sequential([layers.Dense(2, activation='relu', name='layer1'), layers.Dense(3, activation='relu', name='layer2'), layers.Dense(4, name='layer3')]) +x = ops.ones((3, 3)) +y = model(x) +'' +layer1 = layers.Dense(2, activation='relu', name='layer1') +layer2 = layers.Dense(3, activation='relu', name='layer2') +layer3 = layers.Dense(4, name='layer3') +x = ops.ones((3, 3)) +y = layer3(layer2(layer1(x))) +'' +'' +model = keras.Sequential([layers.Dense(2, activation='relu'), layers.Dense(3, activation='relu'), layers.Dense(4)]) +'' +model.layers +'' +model = keras.Sequential() +model.add(layers.Dense(2, activation='relu')) +model.add(layers.Dense(3, activation='relu')) +model.add(layers.Dense(4)) +'' +model.pop() +print(len(model.layers)) +'' +model = keras.Sequential(name='my_sequential') +model.add(layers.Dense(2, activation='relu', name='layer1')) +model.add(layers.Dense(3, activation='relu', name='layer2')) +model.add(layers.Dense(4, name='layer3')) +'' +layer = layers.Dense(3) +layer.weights +'' +x = ops.ones((1, 4)) +y = layer(x) +layer.weights +'' +model = keras.Sequential([layers.Dense(2, activation='relu'), layers.Dense(3, activation='relu'), layers.Dense(4)]) +x = ops.ones((1, 4)) +y = model(x) +print('Number of weights after calling the model:', len(model.weights)) +'' +model.summary() +'' +model = keras.Sequential() +model.add(keras.Input(shape=(4,))) +model.add(layers.Dense(2, activation='relu')) +model.summary() +'' +model.layers +'' +'' +model = keras.Sequential() +model.add(keras.Input(shape=(250, 250, 3))) +model.add(layers.Conv2D(32, 5, strides=2, activation='relu')) +model.add(layers.Conv2D(32, 3, activation='relu')) +model.add(layers.MaxPooling2D(3)) +model.summary() +model.add(layers.Conv2D(32, 3, activation='relu')) +model.add(layers.Conv2D(32, 3, activation='relu')) +model.add(layers.MaxPooling2D(3)) +model.add(layers.Conv2D(32, 3, activation='relu')) +model.add(layers.Conv2D(32, 3, activation='relu')) +model.add(layers.MaxPooling2D(2)) +model.summary() +model.add(layers.GlobalMaxPooling2D()) +model.add(layers.Dense(10)) +'' +'' +'' +initial_model = keras.Sequential([keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation='relu'), layers.Conv2D(32, 3, activation='relu'), layers.Conv2D(32, 3, activation='relu')]) +feature_extractor = keras.Model(inputs=initial_model.inputs, outputs=[layer.output for layer in initial_model.layers]) +x = ops.ones((1, 250, 250, 3)) +features = feature_extractor(x) +'' +initial_model = keras.Sequential([keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation='relu'), layers.Conv2D(32, 3, activation='relu', name='my_intermediate_layer'), layers.Conv2D(32, 3, activation='relu')]) +feature_extractor = keras.Model(inputs=initial_model.inputs, outputs=initial_model.get_layer(name='my_intermediate_layer').output) +x = ops.ones((1, 250, 250, 3)) +features = feature_extractor(x) +'' +'' + +# File: keras-master/guides/training_with_built_in_methods.py +"""""" +'' +import torch +import tensorflow as tf +import os +import numpy as np +import keras +from keras import layers +from keras import ops +'' +'' +inputs = keras.Input(shape=(784,), name='digits') +x = layers.Dense(64, activation='relu', name='dense_1')(inputs) +x = layers.Dense(64, activation='relu', name='dense_2')(x) +outputs = layers.Dense(10, activation='softmax', name='predictions')(x) +model = keras.Model(inputs=inputs, outputs=outputs) +'' +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = x_train.reshape(60000, 784).astype('float32') / 255 +x_test = x_test.reshape(10000, 784).astype('float32') / 255 +y_train = y_train.astype('float32') +y_test = y_test.astype('float32') +x_val = x_train[-10000:] +y_val = y_train[-10000:] +x_train = x_train[:-10000] +y_train = y_train[:-10000] +'' +model.compile(optimizer=keras.optimizers.RMSprop(), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()]) +'' +print('Fit model on training data') +history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_data=(x_val, y_val)) +'' +history.history +'' +print('Evaluate on test data') +results = model.evaluate(x_test, y_test, batch_size=128) +print('test loss, test acc:', results) +print('Generate predictions for 3 samples') +predictions = model.predict(x_test[:3]) +print('predictions shape:', predictions.shape) +'' +'' +model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()]) +'' +model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) +'' + +def get_uncompiled_model(): + inputs = keras.Input(shape=(784,), name='digits') + x = layers.Dense(64, activation='relu', name='dense_1')(inputs) + x = layers.Dense(64, activation='relu', name='dense_2')(x) + outputs = layers.Dense(10, activation='softmax', name='predictions')(x) + model = keras.Model(inputs=inputs, outputs=outputs) + return model + +def get_compiled_model(): + model = get_uncompiled_model() + model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) + return model +'' +'' + +def custom_mean_squared_error(y_true, y_pred): + return ops.mean(ops.square(y_true - y_pred), axis=-1) +model = get_uncompiled_model() +model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error) +y_train_one_hot = ops.one_hot(y_train, num_classes=10) +model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) +'' + +class CustomMSE(keras.losses.Loss): + + def __init__(self, regularization_factor=0.1, name='custom_mse'): + super().__init__(name=name) + self.regularization_factor = regularization_factor + + def call(self, y_true, y_pred): + mse = ops.mean(ops.square(y_true - y_pred), axis=-1) + reg = ops.mean(ops.square(0.5 - y_pred), axis=-1) + return mse + reg * self.regularization_factor +model = get_uncompiled_model() +model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE()) +y_train_one_hot = ops.one_hot(y_train, num_classes=10) +model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) +'' + +class CategoricalTruePositives(keras.metrics.Metric): + + def __init__(self, name='categorical_true_positives', **kwargs): + super().__init__(name=name, **kwargs) + self.true_positives = self.add_variable(shape=(), name='ctp', initializer='zeros') + + def update_state(self, y_true, y_pred, sample_weight=None): + y_pred = ops.reshape(ops.argmax(y_pred, axis=1), (-1, 1)) + values = ops.cast(y_true, 'int32') == ops.cast(y_pred, 'int32') + values = ops.cast(values, 'float32') + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, 'float32') + values = ops.multiply(values, sample_weight) + self.true_positives.assign_add(ops.sum(values)) + + def result(self): + return self.true_positives + + def reset_state(self): + self.true_positives.assign(0) +model = get_uncompiled_model() +model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[CategoricalTruePositives()]) +model.fit(x_train, y_train, batch_size=64, epochs=3) +'' + +class ActivityRegularizationLayer(layers.Layer): + + def call(self, inputs): + self.add_loss(ops.sum(inputs) * 0.1) + return inputs +inputs = keras.Input(shape=(784,), name='digits') +x = layers.Dense(64, activation='relu', name='dense_1')(inputs) +x = ActivityRegularizationLayer()(x) +x = layers.Dense(64, activation='relu', name='dense_2')(x) +outputs = layers.Dense(10, name='predictions')(x) +model = keras.Model(inputs=inputs, outputs=outputs) +model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True)) +model.fit(x_train, y_train, batch_size=64, epochs=1) +'' + +class LogisticEndpoint(keras.layers.Layer): + + def __init__(self, name=None): + super().__init__(name=name) + self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) + + def call(self, targets, logits, sample_weights=None): + loss = self.loss_fn(targets, logits, sample_weights) + self.add_loss(loss) + return ops.softmax(logits) +'' +inputs = keras.Input(shape=(3,), name='inputs') +targets = keras.Input(shape=(10,), name='targets') +logits = keras.layers.Dense(10)(inputs) +predictions = LogisticEndpoint(name='predictions')(targets, logits) +model = keras.Model(inputs=[inputs, targets], outputs=predictions) +model.compile(optimizer='adam') +data = {'inputs': np.random.random((3, 3)), 'targets': np.random.random((3, 10))} +model.fit(data) +'' +'' +model = get_compiled_model() +model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1) +'' +model = get_compiled_model() +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) +test_dataset = test_dataset.batch(64) +model.fit(train_dataset, epochs=3) +print('Evaluate') +result = model.evaluate(test_dataset) +dict(zip(model.metrics_names, result)) +'' +model = get_compiled_model() +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +model.fit(train_dataset, epochs=3, steps_per_epoch=100) +'' +model = get_compiled_model() +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) +val_dataset = val_dataset.batch(64) +model.fit(train_dataset, epochs=1, validation_data=val_dataset) +'' +model = get_compiled_model() +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) +val_dataset = val_dataset.batch(64) +model.fit(train_dataset, epochs=1, validation_data=val_dataset, validation_steps=10) +'' +'' + +class ExamplePyDataset(keras.utils.PyDataset): + + def __init__(self, x, y, batch_size, **kwargs): + super().__init__(**kwargs) + self.x = x + self.y = y + self.batch_size = batch_size + + def __len__(self): + return int(np.ceil(len(self.x) / float(self.batch_size))) + + def __getitem__(self, idx): + batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] + batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] + return (batch_x, batch_y) +train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32) +val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32) +'' +model = get_compiled_model() +model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1) +'' +model.evaluate(val_py_dataset) +'' +train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32, workers=4) +val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32, workers=4) +model = get_compiled_model() +model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1) +'' + +class ExampleTorchDataset(torch.utils.data.Dataset): + + def __init__(self, x, y): + self.x = x + self.y = y + + def __len__(self): + return len(self.x) + + def __getitem__(self, idx): + return (self.x[idx], self.y[idx]) +train_torch_dataset = ExampleTorchDataset(x_train, y_train) +val_torch_dataset = ExampleTorchDataset(x_val, y_val) +'' +train_dataloader = torch.utils.data.DataLoader(train_torch_dataset, batch_size=32, shuffle=True) +val_dataloader = torch.utils.data.DataLoader(val_torch_dataset, batch_size=32, shuffle=True) +'' +model = get_compiled_model() +model.fit(train_dataloader, batch_size=64, validation_data=val_dataloader, epochs=1) +model.evaluate(val_dataloader) +'' +'' +'' +class_weight = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 2.0, 6: 1.0, 7: 1.0, 8: 1.0, 9: 1.0} +print('Fit with class weight') +model = get_compiled_model() +model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1) +'' +sample_weight = np.ones(shape=(len(y_train),)) +sample_weight[y_train == 5] = 2.0 +print('Fit with sample weight') +model = get_compiled_model() +model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1) +'' +sample_weight = np.ones(shape=(len(y_train),)) +sample_weight[y_train == 5] = 2.0 +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +model = get_compiled_model() +model.fit(train_dataset, epochs=1) +'' +image_input = keras.Input(shape=(32, 32, 3), name='img_input') +timeseries_input = keras.Input(shape=(None, 10), name='ts_input') +x1 = layers.Conv2D(3, 3)(image_input) +x1 = layers.GlobalMaxPooling2D()(x1) +x2 = layers.Conv1D(3, 3)(timeseries_input) +x2 = layers.GlobalMaxPooling1D()(x2) +x = layers.concatenate([x1, x2]) +score_output = layers.Dense(1, name='score_output')(x) +class_output = layers.Dense(5, name='class_output')(x) +model = keras.Model(inputs=[image_input, timeseries_input], outputs=[score_output, class_output]) +'' +keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()]) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], metrics=[[keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError()], [keras.metrics.CategoricalAccuracy()]]) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss={'score_output': keras.losses.MeanSquaredError(), 'class_output': keras.losses.CategoricalCrossentropy()}, metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError()], 'class_output': [keras.metrics.CategoricalAccuracy()]}) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss={'score_output': keras.losses.MeanSquaredError(), 'class_output': keras.losses.CategoricalCrossentropy()}, metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError()], 'class_output': [keras.metrics.CategoricalAccuracy()]}, loss_weights={'score_output': 2.0, 'class_output': 1.0}) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=[None, keras.losses.CategoricalCrossentropy()]) +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss={'class_output': keras.losses.CategoricalCrossentropy()}) +'' +model.compile(optimizer=keras.optimizers.RMSprop(0.001), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()]) +img_data = np.random.random_sample(size=(100, 32, 32, 3)) +ts_data = np.random.random_sample(size=(100, 20, 10)) +score_targets = np.random.random_sample(size=(100, 1)) +class_targets = np.random.random_sample(size=(100, 5)) +model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1) +model.fit({'img_input': img_data, 'ts_input': ts_data}, {'score_output': score_targets, 'class_output': class_targets}, batch_size=32, epochs=1) +'' +train_dataset = tf.data.Dataset.from_tensor_slices(({'img_input': img_data, 'ts_input': ts_data}, {'score_output': score_targets, 'class_output': class_targets})) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) +model.fit(train_dataset, epochs=1) +'' +model = get_compiled_model() +callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.01, patience=2, verbose=1)] +model.fit(x_train, y_train, epochs=20, batch_size=64, callbacks=callbacks, validation_split=0.2) +'' + +class LossHistory(keras.callbacks.Callback): + + def on_train_begin(self, logs): + self.per_batch_losses = [] + + def on_batch_end(self, batch, logs): + self.per_batch_losses.append(logs.get('loss')) +'' +model = get_compiled_model() +callbacks = [keras.callbacks.ModelCheckpoint(filepath='mymodel_{epoch}.keras', save_best_only=True, monitor='val_loss', verbose=1)] +model.fit(x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2) +'' +checkpoint_dir = './ckpt' +if not os.path.exists(checkpoint_dir): + os.makedirs(checkpoint_dir) + +def make_or_restore_model(): + checkpoints = [checkpoint_dir + '/' + name for name in os.listdir(checkpoint_dir)] + if checkpoints: + latest_checkpoint = max(checkpoints, key=os.path.getctime) + print('Restoring from', latest_checkpoint) + return keras.models.load_model(latest_checkpoint) + print('Creating a new model') + return get_compiled_model() +model = make_or_restore_model() +callbacks = [keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir + '/model-loss={loss:.2f}.keras', save_freq=100)] +model.fit(x_train, y_train, epochs=1, callbacks=callbacks) +'' +'' +initial_learning_rate = 0.1 +lr_schedule = keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) +optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule) +'' +'' +'' +keras.callbacks.TensorBoard(log_dir='/full_path_to_your_logs', histogram_freq=0, embeddings_freq=0, update_freq='epoch') +'' + +# File: keras-master/guides/transfer_learning.py +"""""" +'' +import numpy as np +import keras +from keras import layers +import tensorflow_datasets as tfds +import matplotlib.pyplot as plt +'' +'' +layer = keras.layers.Dense(3) +layer.build((None, 4)) +print('weights:', len(layer.weights)) +print('trainable_weights:', len(layer.trainable_weights)) +print('non_trainable_weights:', len(layer.non_trainable_weights)) +'' +layer = keras.layers.BatchNormalization() +layer.build((None, 4)) +print('weights:', len(layer.weights)) +print('trainable_weights:', len(layer.trainable_weights)) +print('non_trainable_weights:', len(layer.non_trainable_weights)) +'' +layer = keras.layers.Dense(3) +layer.build((None, 4)) +layer.trainable = False +print('weights:', len(layer.weights)) +print('trainable_weights:', len(layer.trainable_weights)) +print('non_trainable_weights:', len(layer.non_trainable_weights)) +'' +layer1 = keras.layers.Dense(3, activation='relu') +layer2 = keras.layers.Dense(3, activation='sigmoid') +model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2]) +layer1.trainable = False +initial_layer1_weights_values = layer1.get_weights() +model.compile(optimizer='adam', loss='mse') +model.fit(np.random.random((2, 3)), np.random.random((2, 3))) +final_layer1_weights_values = layer1.get_weights() +np.testing.assert_allclose(initial_layer1_weights_values[0], final_layer1_weights_values[0]) +np.testing.assert_allclose(initial_layer1_weights_values[1], final_layer1_weights_values[1]) +'' +'' +inner_model = keras.Sequential([keras.Input(shape=(3,)), keras.layers.Dense(3, activation='relu'), keras.layers.Dense(3, activation='relu')]) +model = keras.Sequential([keras.Input(shape=(3,)), inner_model, keras.layers.Dense(3, activation='sigmoid')]) +model.trainable = False +assert inner_model.trainable == False +assert inner_model.layers[0].trainable == False +'' +'' +'' +'' +tfds.disable_progress_bar() +(train_ds, validation_ds, test_ds) = tfds.load('cats_vs_dogs', split=['train[:40%]', 'train[40%:50%]', 'train[50%:60%]'], as_supervised=True) +print(f'Number of training samples: {train_ds.cardinality()}') +print(f'Number of validation samples: {validation_ds.cardinality()}') +print(f'Number of test samples: {test_ds.cardinality()}') +'' +plt.figure(figsize=(10, 10)) +for (i, (image, label)) in enumerate(train_ds.take(9)): + ax = plt.subplot(3, 3, i + 1) + plt.imshow(image) + plt.title(int(label)) + plt.axis('off') +'' +'' +resize_fn = keras.layers.Resizing(150, 150) +train_ds = train_ds.map(lambda x, y: (resize_fn(x), y)) +validation_ds = validation_ds.map(lambda x, y: (resize_fn(x), y)) +test_ds = test_ds.map(lambda x, y: (resize_fn(x), y)) +'' +augmentation_layers = [layers.RandomFlip('horizontal'), layers.RandomRotation(0.1)] + +def data_augmentation(x): + for layer in augmentation_layers: + x = layer(x) + return x +train_ds = train_ds.map(lambda x, y: (data_augmentation(x), y)) +'' +from tensorflow import data as tf_data +batch_size = 64 +train_ds = train_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache() +validation_ds = validation_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache() +test_ds = test_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache() +'' +for (images, labels) in train_ds.take(1): + plt.figure(figsize=(10, 10)) + first_image = images[0] + for i in range(9): + ax = plt.subplot(3, 3, i + 1) + augmented_image = data_augmentation(np.expand_dims(first_image, 0)) + plt.imshow(np.array(augmented_image[0]).astype('int32')) + plt.title(int(labels[0])) + plt.axis('off') +'' +base_model = keras.applications.Xception(weights='imagenet', input_shape=(150, 150, 3), include_top=False) +base_model.trainable = False +inputs = keras.Input(shape=(150, 150, 3)) +scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1) +x = scale_layer(inputs) +x = base_model(x, training=False) +x = keras.layers.GlobalAveragePooling2D()(x) +x = keras.layers.Dropout(0.2)(x) +outputs = keras.layers.Dense(1)(x) +model = keras.Model(inputs, outputs) +model.summary(show_trainable=True) +'' +model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()]) +epochs = 2 +print('Fitting the top layer of the model') +model.fit(train_ds, epochs=epochs, validation_data=validation_ds) +'' +base_model.trainable = True +model.summary(show_trainable=True) +model.compile(optimizer=keras.optimizers.Adam(1e-05), loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()]) +epochs = 1 +print('Fitting the end-to-end model') +model.fit(train_ds, epochs=epochs, validation_data=validation_ds) +'' +print('Test dataset evaluation') +model.evaluate(test_ds) + +# File: keras-master/guides/understanding_masking_and_padding.py +"""""" +'' +import numpy as np +import keras +from keras import ops +from keras import layers +'' +'' +raw_inputs = [[711, 632, 71], [73, 8, 3215, 55, 927], [83, 91, 1, 645, 1253, 927]] +padded_inputs = keras.utils.pad_sequences(raw_inputs, padding='post') +print(padded_inputs) +'' +'' +embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) +masked_output = embedding(padded_inputs) +print(masked_output._keras_mask) +masking_layer = layers.Masking() +unmasked_embedding = ops.cast(ops.tile(ops.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), dtype='float32') +masked_embedding = masking_layer(unmasked_embedding) +print(masked_embedding._keras_mask) +'' +'' +model = keras.Sequential([layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32)]) +'' +inputs = keras.Input(shape=(None,), dtype='int32') +x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs) +outputs = layers.LSTM(32)(x) +model = keras.Model(inputs, outputs) +'' +'' + +class MyLayer(layers.Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) + self.lstm = layers.LSTM(32) + + def call(self, inputs): + x = self.embedding(inputs) + mask = self.embedding.compute_mask(inputs) + output = self.lstm(x, mask=mask) + return output +layer = MyLayer() +x = np.random.random((32, 10)) * 100 +x = x.astype('int32') +layer(x) +'' +'' + +class TemporalSplit(keras.layers.Layer): + + def call(self, inputs): + return ops.split(inputs, 2, axis=1) + + def compute_mask(self, inputs, mask=None): + if mask is None: + return None + return ops.split(mask, 2, axis=1) +(first_half, second_half) = TemporalSplit()(masked_embedding) +print(first_half._keras_mask) +print(second_half._keras_mask) +'' + +class CustomEmbedding(keras.layers.Layer): + + def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs): + super().__init__(**kwargs) + self.input_dim = input_dim + self.output_dim = output_dim + self.mask_zero = mask_zero + + def build(self, input_shape): + self.embeddings = self.add_weight(shape=(self.input_dim, self.output_dim), initializer='random_normal', dtype='float32') + + def call(self, inputs): + inputs = ops.cast(inputs, 'int32') + return ops.take(self.embeddings, inputs) + + def compute_mask(self, inputs, mask=None): + if not self.mask_zero: + return None + return ops.not_equal(inputs, 0) +layer = CustomEmbedding(10, 32, mask_zero=True) +x = np.random.random((3, 10)) * 9 +x = x.astype('int32') +y = layer(x) +mask = layer.compute_mask(x) +print(mask) +'' +'' + +class MyActivation(keras.layers.Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + + def call(self, inputs): + return ops.relu(inputs) +'' +inputs = keras.Input(shape=(None,), dtype='int32') +x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs) +x = MyActivation()(x) +print('Mask found:', x._keras_mask) +outputs = layers.LSTM(32)(x) +model = keras.Model(inputs, outputs) +y = model(np.random.randint(0, 5000, size=(32, 100))) +'' + +class TemporalSoftmax(keras.layers.Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + + def call(self, inputs, mask=None): + assert mask is not None + broadcast_float_mask = ops.expand_dims(ops.cast(mask, 'float32'), -1) + inputs_exp = ops.exp(inputs) * broadcast_float_mask + inputs_sum = ops.sum(inputs_exp * broadcast_float_mask, axis=-1, keepdims=True) + return inputs_exp / inputs_sum +inputs = keras.Input(shape=(None,), dtype='int32') +x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs) +x = layers.Dense(1)(x) +outputs = TemporalSoftmax()(x) +model = keras.Model(inputs, outputs) +y = model(np.random.randint(0, 10, size=(32, 100))) +'' + +# File: keras-master/guides/writing_a_custom_training_loop_in_jax.py +"""""" +'' +import os +os.environ['KERAS_BACKEND'] = 'jax' +import jax +import tensorflow as tf +import keras +import numpy as np +'' +'' + +def get_model(): + inputs = keras.Input(shape=(784,), name='digits') + x1 = keras.layers.Dense(64, activation='relu')(inputs) + x2 = keras.layers.Dense(64, activation='relu')(x1) + outputs = keras.layers.Dense(10, name='predictions')(x2) + model = keras.Model(inputs=inputs, outputs=outputs) + return model +model = get_model() +batch_size = 32 +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = np.reshape(x_train, (-1, 784)).astype('float32') +x_test = np.reshape(x_test, (-1, 784)).astype('float32') +y_train = keras.utils.to_categorical(y_train) +y_test = keras.utils.to_categorical(y_test) +x_val = x_train[-10000:] +y_val = y_train[-10000:] +x_train = x_train[:-10000] +y_train = y_train[:-10000] +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) +val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) +val_dataset = val_dataset.batch(batch_size) +'' +loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) +optimizer = keras.optimizers.Adam(learning_rate=0.001) +'' + +def compute_loss_and_updates(trainable_variables, non_trainable_variables, x, y): + (y_pred, non_trainable_variables) = model.stateless_call(trainable_variables, non_trainable_variables, x) + loss = loss_fn(y, y_pred) + return (loss, non_trainable_variables) +'' +grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True) +'' + +def train_step(state, data): + (trainable_variables, non_trainable_variables, optimizer_variables) = state + (x, y) = data + ((loss, non_trainable_variables), grads) = grad_fn(trainable_variables, non_trainable_variables, x, y) + (trainable_variables, optimizer_variables) = optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + return (loss, (trainable_variables, non_trainable_variables, optimizer_variables)) +'' + +@jax.jit +def train_step(state, data): + (trainable_variables, non_trainable_variables, optimizer_variables) = state + (x, y) = data + ((loss, non_trainable_variables), grads) = grad_fn(trainable_variables, non_trainable_variables, x, y) + (trainable_variables, optimizer_variables) = optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + return (loss, (trainable_variables, non_trainable_variables, optimizer_variables)) +'' +optimizer.build(model.trainable_variables) +trainable_variables = model.trainable_variables +non_trainable_variables = model.non_trainable_variables +optimizer_variables = optimizer.variables +state = (trainable_variables, non_trainable_variables, optimizer_variables) +for (step, data) in enumerate(train_dataset): + data = (data[0].numpy(), data[1].numpy()) + (loss, state) = train_step(state, data) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {float(loss):.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +'' +(trainable_variables, non_trainable_variables, optimizer_variables) = state +for (variable, value) in zip(model.trainable_variables, trainable_variables): + variable.assign(value) +for (variable, value) in zip(model.non_trainable_variables, non_trainable_variables): + variable.assign(value) +'' +model = get_model() +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) +train_acc_metric = keras.metrics.CategoricalAccuracy() +val_acc_metric = keras.metrics.CategoricalAccuracy() + +def compute_loss_and_updates(trainable_variables, non_trainable_variables, metric_variables, x, y): + (y_pred, non_trainable_variables) = model.stateless_call(trainable_variables, non_trainable_variables, x) + loss = loss_fn(y, y_pred) + metric_variables = train_acc_metric.stateless_update_state(metric_variables, y, y_pred) + return (loss, (non_trainable_variables, metric_variables)) +grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True) + +@jax.jit +def train_step(state, data): + (trainable_variables, non_trainable_variables, optimizer_variables, metric_variables) = state + (x, y) = data + ((loss, (non_trainable_variables, metric_variables)), grads) = grad_fn(trainable_variables, non_trainable_variables, metric_variables, x, y) + (trainable_variables, optimizer_variables) = optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + return (loss, (trainable_variables, non_trainable_variables, optimizer_variables, metric_variables)) +'' + +@jax.jit +def eval_step(state, data): + (trainable_variables, non_trainable_variables, metric_variables) = state + (x, y) = data + (y_pred, non_trainable_variables) = model.stateless_call(trainable_variables, non_trainable_variables, x) + loss = loss_fn(y, y_pred) + metric_variables = val_acc_metric.stateless_update_state(metric_variables, y, y_pred) + return (loss, (trainable_variables, non_trainable_variables, metric_variables)) +'' +optimizer.build(model.trainable_variables) +trainable_variables = model.trainable_variables +non_trainable_variables = model.non_trainable_variables +optimizer_variables = optimizer.variables +metric_variables = train_acc_metric.variables +state = (trainable_variables, non_trainable_variables, optimizer_variables, metric_variables) +for (step, data) in enumerate(train_dataset): + data = (data[0].numpy(), data[1].numpy()) + (loss, state) = train_step(state, data) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {float(loss):.4f}') + (_, _, _, metric_variables) = state + for (variable, value) in zip(train_acc_metric.variables, metric_variables): + variable.assign(value) + print(f'Training accuracy: {train_acc_metric.result()}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +metric_variables = val_acc_metric.variables +(trainable_variables, non_trainable_variables, optimizer_variables, metric_variables) = state +state = (trainable_variables, non_trainable_variables, metric_variables) +for (step, data) in enumerate(val_dataset): + data = (data[0].numpy(), data[1].numpy()) + (loss, state) = eval_step(state, data) + if step % 100 == 0: + print(f'Validation loss (for 1 batch) at step {step}: {float(loss):.4f}') + (_, _, metric_variables) = state + for (variable, value) in zip(val_acc_metric.variables, metric_variables): + variable.assign(value) + print(f'Validation accuracy: {val_acc_metric.result()}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +'' + +class ActivityRegularizationLayer(keras.layers.Layer): + + def call(self, inputs): + self.add_loss(0.01 * jax.numpy.sum(inputs)) + return inputs +'' +inputs = keras.Input(shape=(784,), name='digits') +x = keras.layers.Dense(64, activation='relu')(inputs) +x = ActivityRegularizationLayer()(x) +x = keras.layers.Dense(64, activation='relu')(x) +outputs = keras.layers.Dense(10, name='predictions')(x) +model = keras.Model(inputs=inputs, outputs=outputs) +'' + +def compute_loss_and_updates(trainable_variables, non_trainable_variables, metric_variables, x, y): + (y_pred, non_trainable_variables, losses) = model.stateless_call(trainable_variables, non_trainable_variables, x, return_losses=True) + loss = loss_fn(y, y_pred) + if losses: + loss += jax.numpy.sum(losses) + metric_variables = train_acc_metric.stateless_update_state(metric_variables, y, y_pred) + return (loss, non_trainable_variables, metric_variables) +'' + +# File: keras-master/guides/writing_a_custom_training_loop_in_tensorflow.py +"""""" +'' +import time +import os +os.environ['KERAS_BACKEND'] = 'tensorflow' +import tensorflow as tf +import keras +import numpy as np +'' +'' + +def get_model(): + inputs = keras.Input(shape=(784,), name='digits') + x1 = keras.layers.Dense(64, activation='relu')(inputs) + x2 = keras.layers.Dense(64, activation='relu')(x1) + outputs = keras.layers.Dense(10, name='predictions')(x2) + model = keras.Model(inputs=inputs, outputs=outputs) + return model +model = get_model() +'' +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) +batch_size = 32 +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = np.reshape(x_train, (-1, 784)) +x_test = np.reshape(x_test, (-1, 784)) +x_val = x_train[-10000:] +y_val = y_train[-10000:] +x_train = x_train[:-10000] +y_train = y_train[:-10000] +train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) +train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) +val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) +val_dataset = val_dataset.batch(batch_size) +'' +epochs = 3 +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + for (step, (x_batch_train, y_batch_train)) in enumerate(train_dataset): + with tf.GradientTape() as tape: + logits = model(x_batch_train, training=True) + loss_value = loss_fn(y_batch_train, logits) + grads = tape.gradient(loss_value, model.trainable_weights) + optimizer.apply(grads, model.trainable_weights) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +'' +model = get_model() +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) +train_acc_metric = keras.metrics.SparseCategoricalAccuracy() +val_acc_metric = keras.metrics.SparseCategoricalAccuracy() +'' +epochs = 2 +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + start_time = time.time() + for (step, (x_batch_train, y_batch_train)) in enumerate(train_dataset): + with tf.GradientTape() as tape: + logits = model(x_batch_train, training=True) + loss_value = loss_fn(y_batch_train, logits) + grads = tape.gradient(loss_value, model.trainable_weights) + optimizer.apply(grads, model.trainable_weights) + train_acc_metric.update_state(y_batch_train, logits) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') + train_acc = train_acc_metric.result() + print(f'Training acc over epoch: {float(train_acc):.4f}') + train_acc_metric.reset_state() + for (x_batch_val, y_batch_val) in val_dataset: + val_logits = model(x_batch_val, training=False) + val_acc_metric.update_state(y_batch_val, val_logits) + val_acc = val_acc_metric.result() + val_acc_metric.reset_state() + print(f'Validation acc: {float(val_acc):.4f}') + print(f'Time taken: {time.time() - start_time:.2f}s') +'' + +@tf.function +def train_step(x, y): + with tf.GradientTape() as tape: + logits = model(x, training=True) + loss_value = loss_fn(y, logits) + grads = tape.gradient(loss_value, model.trainable_weights) + optimizer.apply(grads, model.trainable_weights) + train_acc_metric.update_state(y, logits) + return loss_value +'' + +@tf.function +def test_step(x, y): + val_logits = model(x, training=False) + val_acc_metric.update_state(y, val_logits) +'' +epochs = 2 +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + start_time = time.time() + for (step, (x_batch_train, y_batch_train)) in enumerate(train_dataset): + loss_value = train_step(x_batch_train, y_batch_train) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') + train_acc = train_acc_metric.result() + print(f'Training acc over epoch: {float(train_acc):.4f}') + train_acc_metric.reset_state() + for (x_batch_val, y_batch_val) in val_dataset: + test_step(x_batch_val, y_batch_val) + val_acc = val_acc_metric.result() + val_acc_metric.reset_state() + print(f'Validation acc: {float(val_acc):.4f}') + print(f'Time taken: {time.time() - start_time:.2f}s') +'' +'' + +class ActivityRegularizationLayer(keras.layers.Layer): + + def call(self, inputs): + self.add_loss(0.01 * tf.reduce_sum(inputs)) + return inputs +'' +inputs = keras.Input(shape=(784,), name='digits') +x = keras.layers.Dense(64, activation='relu')(inputs) +x = ActivityRegularizationLayer()(x) +x = keras.layers.Dense(64, activation='relu')(x) +outputs = keras.layers.Dense(10, name='predictions')(x) +model = keras.Model(inputs=inputs, outputs=outputs) +'' + +@tf.function +def train_step(x, y): + with tf.GradientTape() as tape: + logits = model(x, training=True) + loss_value = loss_fn(y, logits) + loss_value += sum(model.losses) + grads = tape.gradient(loss_value, model.trainable_weights) + optimizer.apply(grads, model.trainable_weights) + train_acc_metric.update_state(y, logits) + return loss_value +'' +'' +discriminator = keras.Sequential([keras.Input(shape=(28, 28, 1)), keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same'), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.GlobalMaxPooling2D(), keras.layers.Dense(1)], name='discriminator') +discriminator.summary() +'' +latent_dim = 128 +generator = keras.Sequential([keras.Input(shape=(latent_dim,)), keras.layers.Dense(7 * 7 * 128), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Reshape((7, 7, 128)), keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2D(1, (7, 7), padding='same', activation='sigmoid')], name='generator') +'' +d_optimizer = keras.optimizers.Adam(learning_rate=0.0003) +g_optimizer = keras.optimizers.Adam(learning_rate=0.0004) +loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) + +@tf.function +def train_step(real_images): + random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) + generated_images = generator(random_latent_vectors) + combined_images = tf.concat([generated_images, real_images], axis=0) + labels = tf.concat([tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0) + labels += 0.05 * tf.random.uniform(labels.shape) + with tf.GradientTape() as tape: + predictions = discriminator(combined_images) + d_loss = loss_fn(labels, predictions) + grads = tape.gradient(d_loss, discriminator.trainable_weights) + d_optimizer.apply(grads, discriminator.trainable_weights) + random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) + misleading_labels = tf.zeros((batch_size, 1)) + with tf.GradientTape() as tape: + predictions = discriminator(generator(random_latent_vectors)) + g_loss = loss_fn(misleading_labels, predictions) + grads = tape.gradient(g_loss, generator.trainable_weights) + g_optimizer.apply(grads, generator.trainable_weights) + return (d_loss, g_loss, generated_images) +'' +batch_size = 64 +((x_train, _), (x_test, _)) = keras.datasets.mnist.load_data() +all_digits = np.concatenate([x_train, x_test]) +all_digits = all_digits.astype('float32') / 255.0 +all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) +dataset = tf.data.Dataset.from_tensor_slices(all_digits) +dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) +epochs = 1 +save_dir = './' +for epoch in range(epochs): + print(f'\nStart epoch {epoch}') + for (step, real_images) in enumerate(dataset): + (d_loss, g_loss, generated_images) = train_step(real_images) + if step % 100 == 0: + print(f'discriminator loss at step {step}: {d_loss:.2f}') + print(f'adversarial loss at step {step}: {g_loss:.2f}') + img = keras.utils.array_to_img(generated_images[0] * 255.0, scale=False) + img.save(os.path.join(save_dir, f'generated_img_{step}.png')) + if step > 10: + break +'' + +# File: keras-master/guides/writing_a_custom_training_loop_in_torch.py +"""""" +'' +import os +os.environ['KERAS_BACKEND'] = 'torch' +import torch +import keras +import numpy as np +'' +'' + +def get_model(): + inputs = keras.Input(shape=(784,), name='digits') + x1 = keras.layers.Dense(64, activation='relu')(inputs) + x2 = keras.layers.Dense(64, activation='relu')(x1) + outputs = keras.layers.Dense(10, name='predictions')(x2) + model = keras.Model(inputs=inputs, outputs=outputs) + return model +batch_size = 32 +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = np.reshape(x_train, (-1, 784)).astype('float32') +x_test = np.reshape(x_test, (-1, 784)).astype('float32') +y_train = keras.utils.to_categorical(y_train) +y_test = keras.utils.to_categorical(y_test) +x_val = x_train[-10000:] +y_val = y_train[-10000:] +x_train = x_train[:-10000] +y_train = y_train[:-10000] +train_dataset = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) +val_dataset = torch.utils.data.TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val)) +train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) +val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False) +'' +model = get_model() +optimizer = torch.optim.Adam(model.parameters(), lr=0.001) +loss_fn = torch.nn.CrossEntropyLoss() +'' +epochs = 3 +for epoch in range(epochs): + for (step, (inputs, targets)) in enumerate(train_dataloader): + logits = model(inputs) + loss = loss_fn(logits, targets) + model.zero_grad() + loss.backward() + optimizer.step() + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +'' +model = get_model() +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + for (step, (inputs, targets)) in enumerate(train_dataloader): + logits = model(inputs) + loss = loss_fn(targets, logits) + model.zero_grad() + trainable_weights = [v for v in model.trainable_weights] + loss.backward() + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + optimizer.apply(gradients, trainable_weights) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') +'' +model = get_model() +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) +train_acc_metric = keras.metrics.CategoricalAccuracy() +val_acc_metric = keras.metrics.CategoricalAccuracy() +'' +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + for (step, (inputs, targets)) in enumerate(train_dataloader): + logits = model(inputs) + loss = loss_fn(targets, logits) + model.zero_grad() + trainable_weights = [v for v in model.trainable_weights] + loss.backward() + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + optimizer.apply(gradients, trainable_weights) + train_acc_metric.update_state(targets, logits) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') + train_acc = train_acc_metric.result() + print(f'Training acc over epoch: {float(train_acc):.4f}') + train_acc_metric.reset_state() + for (x_batch_val, y_batch_val) in val_dataloader: + val_logits = model(x_batch_val, training=False) + val_acc_metric.update_state(y_batch_val, val_logits) + val_acc = val_acc_metric.result() + val_acc_metric.reset_state() + print(f'Validation acc: {float(val_acc):.4f}') +'' + +class ActivityRegularizationLayer(keras.layers.Layer): + + def call(self, inputs): + self.add_loss(0.01 * torch.sum(inputs)) + return inputs +'' +inputs = keras.Input(shape=(784,), name='digits') +x = keras.layers.Dense(64, activation='relu')(inputs) +x = ActivityRegularizationLayer()(x) +x = keras.layers.Dense(64, activation='relu')(x) +outputs = keras.layers.Dense(10, name='predictions')(x) +model = keras.Model(inputs=inputs, outputs=outputs) +'' +model = get_model() +optimizer = keras.optimizers.Adam(learning_rate=0.001) +loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) +train_acc_metric = keras.metrics.CategoricalAccuracy() +val_acc_metric = keras.metrics.CategoricalAccuracy() +for epoch in range(epochs): + print(f'\nStart of epoch {epoch}') + for (step, (inputs, targets)) in enumerate(train_dataloader): + logits = model(inputs) + loss = loss_fn(targets, logits) + if model.losses: + loss = loss + torch.sum(*model.losses) + model.zero_grad() + trainable_weights = [v for v in model.trainable_weights] + loss.backward() + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + optimizer.apply(gradients, trainable_weights) + train_acc_metric.update_state(targets, logits) + if step % 100 == 0: + print(f'Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}') + print(f'Seen so far: {(step + 1) * batch_size} samples') + train_acc = train_acc_metric.result() + print(f'Training acc over epoch: {float(train_acc):.4f}') + train_acc_metric.reset_state() + for (x_batch_val, y_batch_val) in val_dataloader: + val_logits = model(x_batch_val, training=False) + val_acc_metric.update_state(y_batch_val, val_logits) + val_acc = val_acc_metric.result() + val_acc_metric.reset_state() + print(f'Validation acc: {float(val_acc):.4f}') +'' + +# File: keras-master/guides/writing_your_own_callbacks.py +"""""" +'' +'' +import numpy as np +import keras +'' +'' +'' + +def get_model(): + model = keras.Sequential() + model.add(keras.layers.Dense(1)) + model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=0.1), loss='mean_squared_error', metrics=['mean_absolute_error']) + return model +'' +((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data() +x_train = x_train.reshape(-1, 784).astype('float32') / 255.0 +x_test = x_test.reshape(-1, 784).astype('float32') / 255.0 +x_train = x_train[:1000] +y_train = y_train[:1000] +x_test = x_test[:1000] +y_test = y_test[:1000] +'' + +class CustomCallback(keras.callbacks.Callback): + + def on_train_begin(self, logs=None): + keys = list(logs.keys()) + print('Starting training; got log keys: {}'.format(keys)) + + def on_train_end(self, logs=None): + keys = list(logs.keys()) + print('Stop training; got log keys: {}'.format(keys)) + + def on_epoch_begin(self, epoch, logs=None): + keys = list(logs.keys()) + print('Start epoch {} of training; got log keys: {}'.format(epoch, keys)) + + def on_epoch_end(self, epoch, logs=None): + keys = list(logs.keys()) + print('End epoch {} of training; got log keys: {}'.format(epoch, keys)) + + def on_test_begin(self, logs=None): + keys = list(logs.keys()) + print('Start testing; got log keys: {}'.format(keys)) + + def on_test_end(self, logs=None): + keys = list(logs.keys()) + print('Stop testing; got log keys: {}'.format(keys)) + + def on_predict_begin(self, logs=None): + keys = list(logs.keys()) + print('Start predicting; got log keys: {}'.format(keys)) + + def on_predict_end(self, logs=None): + keys = list(logs.keys()) + print('Stop predicting; got log keys: {}'.format(keys)) + + def on_train_batch_begin(self, batch, logs=None): + keys = list(logs.keys()) + print('...Training: start of batch {}; got log keys: {}'.format(batch, keys)) + + def on_train_batch_end(self, batch, logs=None): + keys = list(logs.keys()) + print('...Training: end of batch {}; got log keys: {}'.format(batch, keys)) + + def on_test_batch_begin(self, batch, logs=None): + keys = list(logs.keys()) + print('...Evaluating: start of batch {}; got log keys: {}'.format(batch, keys)) + + def on_test_batch_end(self, batch, logs=None): + keys = list(logs.keys()) + print('...Evaluating: end of batch {}; got log keys: {}'.format(batch, keys)) + + def on_predict_batch_begin(self, batch, logs=None): + keys = list(logs.keys()) + print('...Predicting: start of batch {}; got log keys: {}'.format(batch, keys)) + + def on_predict_batch_end(self, batch, logs=None): + keys = list(logs.keys()) + print('...Predicting: end of batch {}; got log keys: {}'.format(batch, keys)) +'' +model = get_model() +model.fit(x_train, y_train, batch_size=128, epochs=1, verbose=0, validation_split=0.5, callbacks=[CustomCallback()]) +res = model.evaluate(x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]) +res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()]) +'' + +class LossAndErrorPrintingCallback(keras.callbacks.Callback): + + def on_train_batch_end(self, batch, logs=None): + print('Up to batch {}, the average loss is {:7.2f}.'.format(batch, logs['loss'])) + + def on_test_batch_end(self, batch, logs=None): + print('Up to batch {}, the average loss is {:7.2f}.'.format(batch, logs['loss'])) + + def on_epoch_end(self, epoch, logs=None): + print('The average loss for epoch {} is {:7.2f} and mean absolute error is {:7.2f}.'.format(epoch, logs['loss'], logs['mean_absolute_error'])) +model = get_model() +model.fit(x_train, y_train, batch_size=128, epochs=2, verbose=0, callbacks=[LossAndErrorPrintingCallback()]) +res = model.evaluate(x_test, y_test, batch_size=128, verbose=0, callbacks=[LossAndErrorPrintingCallback()]) +'' +'' + +class EarlyStoppingAtMinLoss(keras.callbacks.Callback): + + def __init__(self, patience=0): + super().__init__() + self.patience = patience + self.best_weights = None + + def on_train_begin(self, logs=None): + self.wait = 0 + self.stopped_epoch = 0 + self.best = np.Inf + + def on_epoch_end(self, epoch, logs=None): + current = logs.get('loss') + if np.less(current, self.best): + self.best = current + self.wait = 0 + self.best_weights = self.model.get_weights() + else: + self.wait += 1 + if self.wait >= self.patience: + self.stopped_epoch = epoch + self.model.stop_training = True + print('Restoring model weights from the end of the best epoch.') + self.model.set_weights(self.best_weights) + + def on_train_end(self, logs=None): + if self.stopped_epoch > 0: + print(f'Epoch {self.stopped_epoch + 1}: early stopping') +model = get_model() +model.fit(x_train, y_train, batch_size=64, epochs=30, verbose=0, callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()]) +'' + +class CustomLearningRateScheduler(keras.callbacks.Callback): + + def __init__(self, schedule): + super().__init__() + self.schedule = schedule + + def on_epoch_begin(self, epoch, logs=None): + if not hasattr(self.model.optimizer, 'learning_rate'): + raise ValueError('Optimizer must have a "learning_rate" attribute.') + lr = self.model.optimizer.learning_rate + scheduled_lr = self.schedule(epoch, lr) + self.model.optimizer.learning_rate = scheduled_lr + print(f'\nEpoch {epoch}: Learning rate is {float(np.array(scheduled_lr))}.') +LR_SCHEDULE = [(3, 0.05), (6, 0.01), (9, 0.005), (12, 0.001)] + +def lr_schedule(epoch, lr): + if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]: + return lr + for i in range(len(LR_SCHEDULE)): + if epoch == LR_SCHEDULE[i][0]: + return LR_SCHEDULE[i][1] + return lr +model = get_model() +model.fit(x_train, y_train, batch_size=64, epochs=15, verbose=0, callbacks=[LossAndErrorPrintingCallback(), CustomLearningRateScheduler(lr_schedule)]) +'' + +# File: keras-master/keras/__init__.py +import os +from keras.api import DTypePolicy +from keras.api import FloatDTypePolicy +from keras.api import Function +from keras.api import Initializer +from keras.api import Input +from keras.api import InputSpec +from keras.api import KerasTensor +from keras.api import Layer +from keras.api import Loss +from keras.api import Metric +from keras.api import Model +from keras.api import Operation +from keras.api import Optimizer +from keras.api import Quantizer +from keras.api import Regularizer +from keras.api import Sequential +from keras.api import StatelessScope +from keras.api import SymbolicScope +from keras.api import Variable +from keras.api import __version__ +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import device +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import name_scope +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.api import version +__path__.append(os.path.join(os.path.dirname(__file__), 'api')) +del os + +def __dir__(): + keys = dict.fromkeys(globals().keys()) + keys.pop('src') + keys.pop('api') + return list(keys) +__all__ = [name for name in globals().keys() if not (name.startswith('_') or name in ('src', 'api'))] + +# File: keras-master/keras/api/__init__.py +"""""" +from keras.api import _tf_keras +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.exports import Variable +from keras.src.backend.exports import device +from keras.src.backend.exports import name_scope +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.initializers.initializer import Initializer +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential +from keras.src.ops.function import Function +from keras.src.ops.operation import Operation +from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ +from keras.src.version import version + +# File: keras-master/keras/api/_tf_keras/keras/__init__.py +"""""" +from keras.api import activations +from keras.api import applications +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import legacy +from keras.api import mixed_precision +from keras.api import models +from keras.api import ops +from keras.api import optimizers +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import tree +from keras.api import utils +from keras.api._tf_keras.keras import backend +from keras.api._tf_keras.keras import layers +from keras.api._tf_keras.keras import losses +from keras.api._tf_keras.keras import metrics +from keras.api._tf_keras.keras import preprocessing +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.exports import Variable +from keras.src.backend.exports import device +from keras.src.backend.exports import name_scope +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.initializers.initializer import Initializer +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential +from keras.src.ops.function import Function +from keras.src.ops.operation import Operation +from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ +from keras.src.version import version + +# File: keras-master/keras/api/_tf_keras/keras/activations/__init__.py +"""""" +from keras.src.activations import deserialize +from keras.src.activations import get +from keras.src.activations import serialize +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import hard_silu as hard_swish +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import silu as swish +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh + +# File: keras-master/keras/api/_tf_keras/keras/applications/__init__.py +"""""" +from keras.api.applications import convnext +from keras.api.applications import densenet +from keras.api.applications import efficientnet +from keras.api.applications import efficientnet_v2 +from keras.api.applications import imagenet_utils +from keras.api.applications import inception_resnet_v2 +from keras.api.applications import inception_v3 +from keras.api.applications import mobilenet +from keras.api.applications import mobilenet_v2 +from keras.api.applications import mobilenet_v3 +from keras.api.applications import nasnet +from keras.api.applications import resnet +from keras.api.applications import resnet50 +from keras.api.applications import resnet_v2 +from keras.api.applications import vgg16 +from keras.api.applications import vgg19 +from keras.api.applications import xception +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v3 import MobileNetV3Large +from keras.src.applications.mobilenet_v3 import MobileNetV3Small +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.xception import Xception + +# File: keras-master/keras/api/_tf_keras/keras/applications/convnext/__init__.py +"""""" +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.convnext import decode_predictions +from keras.src.applications.convnext import preprocess_input + +# File: keras-master/keras/api/_tf_keras/keras/applications/efficientnet/__init__.py +"""""" +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet import decode_predictions +from keras.src.applications.efficientnet import preprocess_input + +# File: keras-master/keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py +"""""" +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.efficientnet_v2 import decode_predictions +from keras.src.applications.efficientnet_v2 import preprocess_input + +# File: keras-master/keras/api/_tf_keras/keras/backend/__init__.py +"""""" +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.legacy.backend import abs +from keras.src.legacy.backend import all +from keras.src.legacy.backend import any +from keras.src.legacy.backend import arange +from keras.src.legacy.backend import argmax +from keras.src.legacy.backend import argmin +from keras.src.legacy.backend import batch_dot +from keras.src.legacy.backend import batch_flatten +from keras.src.legacy.backend import batch_get_value +from keras.src.legacy.backend import batch_normalization +from keras.src.legacy.backend import batch_set_value +from keras.src.legacy.backend import bias_add +from keras.src.legacy.backend import binary_crossentropy +from keras.src.legacy.backend import binary_focal_crossentropy +from keras.src.legacy.backend import cast +from keras.src.legacy.backend import cast_to_floatx +from keras.src.legacy.backend import categorical_crossentropy +from keras.src.legacy.backend import categorical_focal_crossentropy +from keras.src.legacy.backend import clip +from keras.src.legacy.backend import concatenate +from keras.src.legacy.backend import constant +from keras.src.legacy.backend import conv1d +from keras.src.legacy.backend import conv2d +from keras.src.legacy.backend import conv2d_transpose +from keras.src.legacy.backend import conv3d +from keras.src.legacy.backend import cos +from keras.src.legacy.backend import count_params +from keras.src.legacy.backend import ctc_batch_cost +from keras.src.legacy.backend import ctc_decode +from keras.src.legacy.backend import ctc_label_dense_to_sparse +from keras.src.legacy.backend import cumprod +from keras.src.legacy.backend import cumsum +from keras.src.legacy.backend import depthwise_conv2d +from keras.src.legacy.backend import dot +from keras.src.legacy.backend import dropout +from keras.src.legacy.backend import dtype +from keras.src.legacy.backend import elu +from keras.src.legacy.backend import equal +from keras.src.legacy.backend import eval +from keras.src.legacy.backend import exp +from keras.src.legacy.backend import expand_dims +from keras.src.legacy.backend import eye +from keras.src.legacy.backend import flatten +from keras.src.legacy.backend import foldl +from keras.src.legacy.backend import foldr +from keras.src.legacy.backend import gather +from keras.src.legacy.backend import get_value +from keras.src.legacy.backend import gradients +from keras.src.legacy.backend import greater +from keras.src.legacy.backend import greater_equal +from keras.src.legacy.backend import hard_sigmoid +from keras.src.legacy.backend import in_top_k +from keras.src.legacy.backend import int_shape +from keras.src.legacy.backend import is_sparse +from keras.src.legacy.backend import l2_normalize +from keras.src.legacy.backend import less +from keras.src.legacy.backend import less_equal +from keras.src.legacy.backend import log +from keras.src.legacy.backend import map_fn +from keras.src.legacy.backend import max +from keras.src.legacy.backend import maximum +from keras.src.legacy.backend import mean +from keras.src.legacy.backend import min +from keras.src.legacy.backend import minimum +from keras.src.legacy.backend import moving_average_update +from keras.src.legacy.backend import name_scope +from keras.src.legacy.backend import ndim +from keras.src.legacy.backend import not_equal +from keras.src.legacy.backend import one_hot +from keras.src.legacy.backend import ones +from keras.src.legacy.backend import ones_like +from keras.src.legacy.backend import permute_dimensions +from keras.src.legacy.backend import pool2d +from keras.src.legacy.backend import pool3d +from keras.src.legacy.backend import pow +from keras.src.legacy.backend import prod +from keras.src.legacy.backend import random_bernoulli +from keras.src.legacy.backend import random_normal +from keras.src.legacy.backend import random_normal_variable +from keras.src.legacy.backend import random_uniform +from keras.src.legacy.backend import random_uniform_variable +from keras.src.legacy.backend import relu +from keras.src.legacy.backend import repeat +from keras.src.legacy.backend import repeat_elements +from keras.src.legacy.backend import reshape +from keras.src.legacy.backend import resize_images +from keras.src.legacy.backend import resize_volumes +from keras.src.legacy.backend import reverse +from keras.src.legacy.backend import rnn +from keras.src.legacy.backend import round +from keras.src.legacy.backend import separable_conv2d +from keras.src.legacy.backend import set_value +from keras.src.legacy.backend import shape +from keras.src.legacy.backend import sigmoid +from keras.src.legacy.backend import sign +from keras.src.legacy.backend import sin +from keras.src.legacy.backend import softmax +from keras.src.legacy.backend import softplus +from keras.src.legacy.backend import softsign +from keras.src.legacy.backend import sparse_categorical_crossentropy +from keras.src.legacy.backend import spatial_2d_padding +from keras.src.legacy.backend import spatial_3d_padding +from keras.src.legacy.backend import sqrt +from keras.src.legacy.backend import square +from keras.src.legacy.backend import squeeze +from keras.src.legacy.backend import stack +from keras.src.legacy.backend import std +from keras.src.legacy.backend import stop_gradient +from keras.src.legacy.backend import sum +from keras.src.legacy.backend import switch +from keras.src.legacy.backend import tanh +from keras.src.legacy.backend import temporal_padding +from keras.src.legacy.backend import tile +from keras.src.legacy.backend import to_dense +from keras.src.legacy.backend import transpose +from keras.src.legacy.backend import truncated_normal +from keras.src.legacy.backend import update +from keras.src.legacy.backend import update_add +from keras.src.legacy.backend import update_sub +from keras.src.legacy.backend import var +from keras.src.legacy.backend import variable +from keras.src.legacy.backend import zeros +from keras.src.legacy.backend import zeros_like +from keras.src.utils.naming import get_uid + +# File: keras-master/keras/api/_tf_keras/keras/callbacks/__init__.py +"""""" +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN + +# File: keras-master/keras/api/_tf_keras/keras/config/__init__.py +"""""" +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.saving.serialization_lib import enable_unsafe_deserialization +from keras.src.utils.backend_utils import set_backend +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.traceback_utils import disable_traceback_filtering +from keras.src.utils.traceback_utils import enable_traceback_filtering +from keras.src.utils.traceback_utils import is_traceback_filtering_enabled + +# File: keras-master/keras/api/_tf_keras/keras/constraints/__init__.py +"""""" +from keras.src.constraints import deserialize +from keras.src.constraints import get +from keras.src.constraints import serialize +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MaxNorm as max_norm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import MinMaxNorm as min_max_norm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import NonNeg as non_neg +from keras.src.constraints.constraints import UnitNorm +from keras.src.constraints.constraints import UnitNorm as unit_norm + +# File: keras-master/keras/api/_tf_keras/keras/datasets/__init__.py +"""""" +from keras.api.datasets import boston_housing +from keras.api.datasets import california_housing +from keras.api.datasets import cifar10 +from keras.api.datasets import cifar100 +from keras.api.datasets import fashion_mnist +from keras.api.datasets import imdb +from keras.api.datasets import mnist +from keras.api.datasets import reuters + +# File: keras-master/keras/api/_tf_keras/keras/distribution/__init__.py +"""""" +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution + +# File: keras-master/keras/api/_tf_keras/keras/dtype_policies/__init__.py +"""""" +from keras.src.dtype_policies import deserialize +from keras.src.dtype_policies import get +from keras.src.dtype_policies import serialize +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap + +# File: keras-master/keras/api/_tf_keras/keras/initializers/__init__.py +"""""" +from keras.src.initializers import deserialize +from keras.src.initializers import get +from keras.src.initializers import serialize +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Constant as constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import Identity as IdentityInitializer +from keras.src.initializers.constant_initializers import Identity as identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Ones as ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.constant_initializers import Zeros as zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import GlorotNormal as glorot_normal +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import GlorotUniform as glorot_uniform +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeNormal as he_normal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import HeUniform as he_uniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import LecunNormal as lecun_normal +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import LecunUniform as lecun_uniform +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import OrthogonalInitializer as Orthogonal +from keras.src.initializers.random_initializers import OrthogonalInitializer as orthogonal +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import RandomNormal as random_normal +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import RandomUniform as random_uniform +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import TruncatedNormal as truncated_normal +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.initializers.random_initializers import VarianceScaling as variance_scaling + +# File: keras-master/keras/api/_tf_keras/keras/layers/__init__.py +"""""" +from keras.src.export.export_lib import TFSMLayer +from keras.src.layers import deserialize +from keras.src.layers import serialize +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import GroupedQueryAttention as GroupQueryAttention +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose as Convolution1DTranspose +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose as Convolution2DTranspose +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose as Convolution3DTranspose +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D as SeparableConvolution1D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D as SeparableConvolution2D +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import BatchNormalization +from keras.src.layers.normalization.group_normalization import GroupNormalization +from keras.src.layers.normalization.layer_normalization import LayerNormalization +from keras.src.layers.normalization.spectral_normalization import SpectralNormalization +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D as AvgPool1D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D as AvgPool2D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D as AvgPool3D +from keras.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D +from keras.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D as GlobalAvgPool1D +from keras.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D +from keras.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D as GlobalAvgPool2D +from keras.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D +from keras.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D as GlobalAvgPool3D +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D as GlobalMaxPool1D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D as GlobalMaxPool2D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D as GlobalMaxPool3D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import AutoContrast +from keras.src.layers.preprocessing.image_preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.image_preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.image_preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.image_preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.image_preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.image_preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.image_preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.image_preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.image_preprocessing.solarization import Solarization +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ActivityRegularization +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.legacy.layers import AlphaDropout +from keras.src.legacy.layers import RandomHeight +from keras.src.legacy.layers import RandomWidth +from keras.src.legacy.layers import ThresholdedReLU +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer +from keras.src.utils.torch_utils import TorchModuleWrapper + +# File: keras-master/keras/api/_tf_keras/keras/losses/__init__.py +"""""" +from keras.src.legacy.losses import Reduction +from keras.src.losses import deserialize +from keras.src.losses import get +from keras.src.losses import serialize +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence as KLD +from keras.src.losses.losses import kl_divergence as kld +from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence +from keras.src.losses.losses import log_cosh as logcosh +from keras.src.losses.losses import mean_absolute_error as MAE +from keras.src.losses.losses import mean_absolute_error as mae +from keras.src.losses.losses import mean_absolute_percentage_error as MAPE +from keras.src.losses.losses import mean_absolute_percentage_error as mape +from keras.src.losses.losses import mean_squared_error as MSE +from keras.src.losses.losses import mean_squared_error as mse +from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE +from keras.src.losses.losses import mean_squared_logarithmic_error as msle +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky + +# File: keras-master/keras/api/_tf_keras/keras/metrics/__init__.py +"""""" +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence as KLD +from keras.src.losses.losses import kl_divergence as kld +from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence +from keras.src.losses.losses import log_cosh as logcosh +from keras.src.losses.losses import mean_absolute_error as MAE +from keras.src.losses.losses import mean_absolute_error as mae +from keras.src.losses.losses import mean_absolute_percentage_error as MAPE +from keras.src.losses.losses import mean_absolute_percentage_error as mape +from keras.src.losses.losses import mean_squared_error as MSE +from keras.src.losses.losses import mean_squared_error as mse +from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE +from keras.src.losses.losses import mean_squared_logarithmic_error as msle +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import deserialize +from keras.src.metrics import get +from keras.src.metrics import serialize +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import binary_accuracy +from keras.src.metrics.accuracy_metrics import categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy +from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import SparseCategoricalCrossentropy +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError + +# File: keras-master/keras/api/_tf_keras/keras/mixed_precision/__init__.py +"""""" +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy as set_global_policy +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer + +# File: keras-master/keras/api/_tf_keras/keras/models/__init__.py +"""""" +from keras.src.models.cloning import clone_model +from keras.src.models.model import Model +from keras.src.models.model import model_from_json +from keras.src.models.sequential import Sequential +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import save_model + +# File: keras-master/keras/api/_tf_keras/keras/ops/__init__.py +"""""" +from keras.api.ops import image +from keras.api.ops import linalg +from keras.api.ops import nn +from keras.api.ops import numpy +from keras.src.ops.core import associative_scan +from keras.src.ops.core import cast +from keras.src.ops.core import cond +from keras.src.ops.core import convert_to_numpy +from keras.src.ops.core import convert_to_tensor +from keras.src.ops.core import custom_gradient +from keras.src.ops.core import dtype +from keras.src.ops.core import fori_loop +from keras.src.ops.core import is_tensor +from keras.src.ops.core import map +from keras.src.ops.core import saturate_cast +from keras.src.ops.core import scan +from keras.src.ops.core import scatter +from keras.src.ops.core import scatter_update +from keras.src.ops.core import shape +from keras.src.ops.core import slice +from keras.src.ops.core import slice_update +from keras.src.ops.core import stop_gradient +from keras.src.ops.core import switch +from keras.src.ops.core import unstack +from keras.src.ops.core import vectorized_map +from keras.src.ops.core import while_loop +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lstsq +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd +from keras.src.ops.math import erf +from keras.src.ops.math import erfinv +from keras.src.ops.math import extract_sequences +from keras.src.ops.math import fft +from keras.src.ops.math import fft2 +from keras.src.ops.math import in_top_k +from keras.src.ops.math import irfft +from keras.src.ops.math import istft +from keras.src.ops.math import logdet +from keras.src.ops.math import logsumexp +from keras.src.ops.math import rfft +from keras.src.ops.math import rsqrt +from keras.src.ops.math import segment_max +from keras.src.ops.math import segment_sum +from keras.src.ops.math import stft +from keras.src.ops.math import top_k +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import bitwise_and +from keras.src.ops.numpy import bitwise_invert +from keras.src.ops.numpy import bitwise_left_shift +from keras.src.ops.numpy import bitwise_not +from keras.src.ops.numpy import bitwise_or +from keras.src.ops.numpy import bitwise_right_shift +from keras.src.ops.numpy import bitwise_xor +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import left_shift +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import right_shift +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import searchsorted +from keras.src.ops.numpy import select +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import trunc +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like + +# File: keras-master/keras/api/_tf_keras/keras/ops/image/__init__.py +"""""" +from keras.src.ops.image import affine_transform +from keras.src.ops.image import crop_images +from keras.src.ops.image import extract_patches +from keras.src.ops.image import hsv_to_rgb +from keras.src.ops.image import map_coordinates +from keras.src.ops.image import pad_images +from keras.src.ops.image import resize +from keras.src.ops.image import rgb_to_grayscale +from keras.src.ops.image import rgb_to_hsv + +# File: keras-master/keras/api/_tf_keras/keras/ops/linalg/__init__.py +"""""" +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lstsq +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd + +# File: keras-master/keras/api/_tf_keras/keras/ops/nn/__init__.py +"""""" +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy + +# File: keras-master/keras/api/_tf_keras/keras/ops/numpy/__init__.py +"""""" +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import bitwise_and +from keras.src.ops.numpy import bitwise_invert +from keras.src.ops.numpy import bitwise_left_shift +from keras.src.ops.numpy import bitwise_not +from keras.src.ops.numpy import bitwise_or +from keras.src.ops.numpy import bitwise_right_shift +from keras.src.ops.numpy import bitwise_xor +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import left_shift +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import right_shift +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import select +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import trunc +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like + +# File: keras-master/keras/api/_tf_keras/keras/optimizers/__init__.py +"""""" +from keras.api.optimizers import legacy +from keras.api.optimizers import schedules +from keras.src.optimizers import deserialize +from keras.src.optimizers import get +from keras.src.optimizers import serialize +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lamb import Lamb +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD + +# File: keras-master/keras/api/_tf_keras/keras/optimizers/legacy/__init__.py +"""""" +from keras.src.optimizers import LegacyOptimizerWarning as Adagrad +from keras.src.optimizers import LegacyOptimizerWarning as Adam +from keras.src.optimizers import LegacyOptimizerWarning as Ftrl +from keras.src.optimizers import LegacyOptimizerWarning as Optimizer +from keras.src.optimizers import LegacyOptimizerWarning as RMSprop +from keras.src.optimizers import LegacyOptimizerWarning as SGD + +# File: keras-master/keras/api/_tf_keras/keras/optimizers/schedules/__init__.py +"""""" +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecayRestarts +from keras.src.optimizers.schedules.learning_rate_schedule import ExponentialDecay +from keras.src.optimizers.schedules.learning_rate_schedule import InverseTimeDecay +from keras.src.optimizers.schedules.learning_rate_schedule import LearningRateSchedule +from keras.src.optimizers.schedules.learning_rate_schedule import PiecewiseConstantDecay +from keras.src.optimizers.schedules.learning_rate_schedule import PolynomialDecay +from keras.src.optimizers.schedules.learning_rate_schedule import deserialize +from keras.src.optimizers.schedules.learning_rate_schedule import serialize + +# File: keras-master/keras/api/_tf_keras/keras/preprocessing/__init__.py +"""""" +from keras.api._tf_keras.keras.preprocessing import image +from keras.api._tf_keras.keras.preprocessing import sequence +from keras.api._tf_keras.keras.preprocessing import text +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import timeseries_dataset_from_array + +# File: keras-master/keras/api/_tf_keras/keras/preprocessing/image/__init__.py +"""""" +from keras.src.legacy.preprocessing.image import DirectoryIterator +from keras.src.legacy.preprocessing.image import ImageDataGenerator +from keras.src.legacy.preprocessing.image import Iterator +from keras.src.legacy.preprocessing.image import NumpyArrayIterator +from keras.src.legacy.preprocessing.image import apply_affine_transform +from keras.src.legacy.preprocessing.image import apply_brightness_shift +from keras.src.legacy.preprocessing.image import apply_channel_shift +from keras.src.legacy.preprocessing.image import random_brightness +from keras.src.legacy.preprocessing.image import random_channel_shift +from keras.src.legacy.preprocessing.image import random_rotation +from keras.src.legacy.preprocessing.image import random_shear +from keras.src.legacy.preprocessing.image import random_shift +from keras.src.legacy.preprocessing.image import random_zoom +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.image_utils import smart_resize + +# File: keras-master/keras/api/_tf_keras/keras/quantizers/__init__.py +"""""" +from keras.src.quantizers import deserialize +from keras.src.quantizers import get +from keras.src.quantizers import serialize +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize + +# File: keras-master/keras/api/_tf_keras/keras/random/__init__.py +"""""" +from keras.src.random.random import beta +from keras.src.random.random import binomial +from keras.src.random.random import categorical +from keras.src.random.random import dropout +from keras.src.random.random import gamma +from keras.src.random.random import normal +from keras.src.random.random import randint +from keras.src.random.random import shuffle +from keras.src.random.random import truncated_normal +from keras.src.random.random import uniform +from keras.src.random.seed_generator import SeedGenerator + +# File: keras-master/keras/api/_tf_keras/keras/regularizers/__init__.py +"""""" +from keras.src.regularizers import deserialize +from keras.src.regularizers import get +from keras.src.regularizers import serialize +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1 as l1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L1L2 as l1_l2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import L2 as l2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import OrthogonalRegularizer as orthogonal_regularizer +from keras.src.regularizers.regularizers import Regularizer + +# File: keras-master/keras/api/_tf_keras/keras/saving/__init__.py +"""""" +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import CustomObjectScope as custom_object_scope +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import load_weights +from keras.src.saving.saving_api import save_model +from keras.src.saving.saving_api import save_weights +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object + +# File: keras-master/keras/api/_tf_keras/keras/tree/__init__.py +"""""" +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import traverse + +# File: keras-master/keras/api/activations/__init__.py +"""""" +from keras.src.activations import deserialize +from keras.src.activations import get +from keras.src.activations import serialize +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import hard_silu as hard_swish +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import silu as swish +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh + +# File: keras-master/keras/api/applications/__init__.py +"""""" +from keras.api.applications import convnext +from keras.api.applications import densenet +from keras.api.applications import efficientnet +from keras.api.applications import efficientnet_v2 +from keras.api.applications import imagenet_utils +from keras.api.applications import inception_resnet_v2 +from keras.api.applications import inception_v3 +from keras.api.applications import mobilenet +from keras.api.applications import mobilenet_v2 +from keras.api.applications import mobilenet_v3 +from keras.api.applications import nasnet +from keras.api.applications import resnet +from keras.api.applications import resnet50 +from keras.api.applications import resnet_v2 +from keras.api.applications import vgg16 +from keras.api.applications import vgg19 +from keras.api.applications import xception +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v3 import MobileNetV3Large +from keras.src.applications.mobilenet_v3 import MobileNetV3Small +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.xception import Xception + +# File: keras-master/keras/api/applications/convnext/__init__.py +"""""" +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.convnext import decode_predictions +from keras.src.applications.convnext import preprocess_input + +# File: keras-master/keras/api/applications/efficientnet/__init__.py +"""""" +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet import decode_predictions +from keras.src.applications.efficientnet import preprocess_input + +# File: keras-master/keras/api/applications/efficientnet_v2/__init__.py +"""""" +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.efficientnet_v2 import decode_predictions +from keras.src.applications.efficientnet_v2 import preprocess_input + +# File: keras-master/keras/api/backend/__init__.py +"""""" +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.utils.naming import get_uid + +# File: keras-master/keras/api/callbacks/__init__.py +"""""" +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN + +# File: keras-master/keras/api/config/__init__.py +"""""" +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.saving.serialization_lib import enable_unsafe_deserialization +from keras.src.utils.backend_utils import set_backend +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.traceback_utils import disable_traceback_filtering +from keras.src.utils.traceback_utils import enable_traceback_filtering +from keras.src.utils.traceback_utils import is_traceback_filtering_enabled + +# File: keras-master/keras/api/constraints/__init__.py +"""""" +from keras.src.constraints import deserialize +from keras.src.constraints import get +from keras.src.constraints import serialize +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MaxNorm as max_norm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import MinMaxNorm as min_max_norm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import NonNeg as non_neg +from keras.src.constraints.constraints import UnitNorm +from keras.src.constraints.constraints import UnitNorm as unit_norm + +# File: keras-master/keras/api/datasets/__init__.py +"""""" +from keras.api.datasets import boston_housing +from keras.api.datasets import california_housing +from keras.api.datasets import cifar10 +from keras.api.datasets import cifar100 +from keras.api.datasets import fashion_mnist +from keras.api.datasets import imdb +from keras.api.datasets import mnist +from keras.api.datasets import reuters + +# File: keras-master/keras/api/distribution/__init__.py +"""""" +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution + +# File: keras-master/keras/api/dtype_policies/__init__.py +"""""" +from keras.src.dtype_policies import deserialize +from keras.src.dtype_policies import get +from keras.src.dtype_policies import serialize +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap + +# File: keras-master/keras/api/initializers/__init__.py +"""""" +from keras.src.initializers import deserialize +from keras.src.initializers import get +from keras.src.initializers import serialize +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Constant as constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import Identity as IdentityInitializer +from keras.src.initializers.constant_initializers import Identity as identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Ones as ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.constant_initializers import Zeros as zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import GlorotNormal as glorot_normal +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import GlorotUniform as glorot_uniform +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeNormal as he_normal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import HeUniform as he_uniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import LecunNormal as lecun_normal +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import LecunUniform as lecun_uniform +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import OrthogonalInitializer as Orthogonal +from keras.src.initializers.random_initializers import OrthogonalInitializer as orthogonal +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import RandomNormal as random_normal +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import RandomUniform as random_uniform +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import TruncatedNormal as truncated_normal +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.initializers.random_initializers import VarianceScaling as variance_scaling + +# File: keras-master/keras/api/layers/__init__.py +"""""" +from keras.src.export.export_lib import TFSMLayer +from keras.src.layers import deserialize +from keras.src.layers import serialize +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import GroupedQueryAttention as GroupQueryAttention +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose as Convolution1DTranspose +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose as Convolution2DTranspose +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose as Convolution3DTranspose +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D as SeparableConvolution1D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D as SeparableConvolution2D +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import BatchNormalization +from keras.src.layers.normalization.group_normalization import GroupNormalization +from keras.src.layers.normalization.layer_normalization import LayerNormalization +from keras.src.layers.normalization.spectral_normalization import SpectralNormalization +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D as AvgPool1D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D as AvgPool2D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D as AvgPool3D +from keras.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D +from keras.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D as GlobalAvgPool1D +from keras.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D +from keras.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D as GlobalAvgPool2D +from keras.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D +from keras.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D as GlobalAvgPool3D +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D as GlobalMaxPool1D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D as GlobalMaxPool2D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D as GlobalMaxPool3D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import AutoContrast +from keras.src.layers.preprocessing.image_preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.image_preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.image_preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.image_preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.image_preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.image_preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.image_preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.image_preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.image_preprocessing.solarization import Solarization +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ActivityRegularization +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer +from keras.src.utils.torch_utils import TorchModuleWrapper + +# File: keras-master/keras/api/losses/__init__.py +"""""" +from keras.src.losses import deserialize +from keras.src.losses import get +from keras.src.losses import serialize +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky + +# File: keras-master/keras/api/metrics/__init__.py +"""""" +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import deserialize +from keras.src.metrics import get +from keras.src.metrics import serialize +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import binary_accuracy +from keras.src.metrics.accuracy_metrics import categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy +from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import SparseCategoricalCrossentropy +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError + +# File: keras-master/keras/api/mixed_precision/__init__.py +"""""" +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy as set_global_policy +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer + +# File: keras-master/keras/api/models/__init__.py +"""""" +from keras.src.models.cloning import clone_model +from keras.src.models.model import Model +from keras.src.models.model import model_from_json +from keras.src.models.sequential import Sequential +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import save_model + +# File: keras-master/keras/api/ops/__init__.py +"""""" +from keras.api.ops import image +from keras.api.ops import linalg +from keras.api.ops import nn +from keras.api.ops import numpy +from keras.src.ops.core import associative_scan +from keras.src.ops.core import cast +from keras.src.ops.core import cond +from keras.src.ops.core import convert_to_numpy +from keras.src.ops.core import convert_to_tensor +from keras.src.ops.core import custom_gradient +from keras.src.ops.core import dtype +from keras.src.ops.core import fori_loop +from keras.src.ops.core import is_tensor +from keras.src.ops.core import map +from keras.src.ops.core import saturate_cast +from keras.src.ops.core import scan +from keras.src.ops.core import scatter +from keras.src.ops.core import scatter_update +from keras.src.ops.core import shape +from keras.src.ops.core import slice +from keras.src.ops.core import slice_update +from keras.src.ops.core import stop_gradient +from keras.src.ops.core import switch +from keras.src.ops.core import unstack +from keras.src.ops.core import vectorized_map +from keras.src.ops.core import while_loop +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lstsq +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd +from keras.src.ops.math import erf +from keras.src.ops.math import erfinv +from keras.src.ops.math import extract_sequences +from keras.src.ops.math import fft +from keras.src.ops.math import fft2 +from keras.src.ops.math import in_top_k +from keras.src.ops.math import irfft +from keras.src.ops.math import istft +from keras.src.ops.math import logdet +from keras.src.ops.math import logsumexp +from keras.src.ops.math import rfft +from keras.src.ops.math import rsqrt +from keras.src.ops.math import segment_max +from keras.src.ops.math import segment_sum +from keras.src.ops.math import stft +from keras.src.ops.math import top_k +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import bitwise_and +from keras.src.ops.numpy import bitwise_invert +from keras.src.ops.numpy import bitwise_left_shift +from keras.src.ops.numpy import bitwise_not +from keras.src.ops.numpy import bitwise_or +from keras.src.ops.numpy import bitwise_right_shift +from keras.src.ops.numpy import bitwise_xor +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import left_shift +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import right_shift +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import searchsorted +from keras.src.ops.numpy import select +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import trunc +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like + +# File: keras-master/keras/api/ops/image/__init__.py +"""""" +from keras.src.ops.image import affine_transform +from keras.src.ops.image import crop_images +from keras.src.ops.image import extract_patches +from keras.src.ops.image import hsv_to_rgb +from keras.src.ops.image import map_coordinates +from keras.src.ops.image import pad_images +from keras.src.ops.image import resize +from keras.src.ops.image import rgb_to_grayscale +from keras.src.ops.image import rgb_to_hsv + +# File: keras-master/keras/api/ops/linalg/__init__.py +"""""" +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lstsq +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd + +# File: keras-master/keras/api/ops/nn/__init__.py +"""""" +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy + +# File: keras-master/keras/api/ops/numpy/__init__.py +"""""" +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import bitwise_and +from keras.src.ops.numpy import bitwise_invert +from keras.src.ops.numpy import bitwise_left_shift +from keras.src.ops.numpy import bitwise_not +from keras.src.ops.numpy import bitwise_or +from keras.src.ops.numpy import bitwise_right_shift +from keras.src.ops.numpy import bitwise_xor +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import left_shift +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import right_shift +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import select +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import trunc +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like + +# File: keras-master/keras/api/optimizers/__init__.py +"""""" +from keras.api.optimizers import legacy +from keras.api.optimizers import schedules +from keras.src.optimizers import deserialize +from keras.src.optimizers import get +from keras.src.optimizers import serialize +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lamb import Lamb +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD + +# File: keras-master/keras/api/optimizers/legacy/__init__.py +"""""" +from keras.src.optimizers import LegacyOptimizerWarning as Adagrad +from keras.src.optimizers import LegacyOptimizerWarning as Adam +from keras.src.optimizers import LegacyOptimizerWarning as Ftrl +from keras.src.optimizers import LegacyOptimizerWarning as Optimizer +from keras.src.optimizers import LegacyOptimizerWarning as RMSprop +from keras.src.optimizers import LegacyOptimizerWarning as SGD + +# File: keras-master/keras/api/optimizers/schedules/__init__.py +"""""" +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecayRestarts +from keras.src.optimizers.schedules.learning_rate_schedule import ExponentialDecay +from keras.src.optimizers.schedules.learning_rate_schedule import InverseTimeDecay +from keras.src.optimizers.schedules.learning_rate_schedule import LearningRateSchedule +from keras.src.optimizers.schedules.learning_rate_schedule import PiecewiseConstantDecay +from keras.src.optimizers.schedules.learning_rate_schedule import PolynomialDecay +from keras.src.optimizers.schedules.learning_rate_schedule import deserialize +from keras.src.optimizers.schedules.learning_rate_schedule import serialize + +# File: keras-master/keras/api/preprocessing/__init__.py +"""""" +from keras.api.preprocessing import image +from keras.api.preprocessing import sequence +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import timeseries_dataset_from_array + +# File: keras-master/keras/api/quantizers/__init__.py +"""""" +from keras.src.quantizers import deserialize +from keras.src.quantizers import get +from keras.src.quantizers import serialize +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize + +# File: keras-master/keras/api/random/__init__.py +"""""" +from keras.src.random.random import beta +from keras.src.random.random import binomial +from keras.src.random.random import categorical +from keras.src.random.random import dropout +from keras.src.random.random import gamma +from keras.src.random.random import normal +from keras.src.random.random import randint +from keras.src.random.random import shuffle +from keras.src.random.random import truncated_normal +from keras.src.random.random import uniform +from keras.src.random.seed_generator import SeedGenerator + +# File: keras-master/keras/api/regularizers/__init__.py +"""""" +from keras.src.regularizers import deserialize +from keras.src.regularizers import get +from keras.src.regularizers import serialize +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1 as l1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L1L2 as l1_l2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import L2 as l2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import OrthogonalRegularizer as orthogonal_regularizer +from keras.src.regularizers.regularizers import Regularizer + +# File: keras-master/keras/api/saving/__init__.py +"""""" +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import CustomObjectScope as custom_object_scope +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import load_weights +from keras.src.saving.saving_api import save_model +from keras.src.saving.saving_api import save_weights +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object + +# File: keras-master/keras/api/tree/__init__.py +"""""" +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import traverse + +# File: keras-master/keras/src/__init__.py +from keras.src import activations +from keras.src import applications +from keras.src import backend +from keras.src import constraints +from keras.src import datasets +from keras.src import initializers +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import regularizers +from keras.src import utils +from keras.src.backend import KerasTensor +from keras.src.layers import Input +from keras.src.layers import Layer +from keras.src.models import Functional +from keras.src.models import Model +from keras.src.models import Sequential +from keras.src.version import __version__ + +# File: keras-master/keras/src/activations/__init__.py +import types +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh +from keras.src.api_export import keras_export +from keras.src.saving import object_registration +from keras.src.saving import serialization_lib +ALL_OBJECTS = {relu, leaky_relu, relu6, softmax, elu, selu, softplus, softsign, silu, gelu, tanh, sigmoid, exponential, hard_sigmoid, hard_silu, linear, mish, log_softmax} +ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS} +ALL_OBJECTS_DICT['swish'] = silu +ALL_OBJECTS_DICT['hard_swish'] = hard_silu + +@keras_export('keras.activations.serialize') +def serialize(activation): + fn_config = serialization_lib.serialize_keras_object(activation) + if 'config' not in fn_config: + raise ValueError(f"Unknown activation function '{activation}' cannot be serialized due to invalid function name. Make sure to use an activation name that matches the references defined in activations.py or use `@keras.saving.register_keras_serializable()`to register any custom activations. config={fn_config}") + if not isinstance(activation, types.FunctionType): + return fn_config + if isinstance(fn_config['config'], str) and fn_config['config'] not in globals(): + fn_config['config'] = object_registration.get_registered_name(activation) + return fn_config + return fn_config['config'] + +@keras_export('keras.activations.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.activations.get') +def get(identifier): + if identifier is None: + return linear + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + return obj + raise ValueError(f'Could not interpret activation function identifier: {identifier}') + +# File: keras-master/keras/src/activations/activations.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export + +@keras_export('keras.activations.relu') +def relu(x, negative_slope=0.0, max_value=None, threshold=0.0): + if backend.any_symbolic_tensors((x,)): + return ReLU(negative_slope=negative_slope, max_value=max_value, threshold=threshold)(x) + return ReLU.static_call(x, negative_slope=negative_slope, max_value=max_value, threshold=threshold) + +class ReLU(ops.Operation): + + def __init__(self, negative_slope=0.0, max_value=None, threshold=0.0, name=None): + super().__init__(name=name) + self.negative_slope = negative_slope + self.max_value = max_value + self.threshold = threshold + + def call(self, x): + return self.static_call(x, negative_slope=self.negative_slope, max_value=self.max_value, threshold=self.threshold) + + def compute_output_spec(self, x): + return backend.KerasTensor(x.shape, x.dtype) + + @staticmethod + def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0): + x = backend.convert_to_tensor(x) + if negative_slope != 0.0: + if max_value is None and threshold == 0: + return backend.nn.leaky_relu(x, negative_slope=negative_slope) + if threshold != 0: + negative_part = backend.nn.relu(-x + threshold) + else: + negative_part = backend.nn.relu(-x) + clip_max = max_value is not None + if threshold != 0: + threshold = ops.cast(threshold, dtype=x.dtype) + x = x * backend.cast(backend.numpy.greater(x, threshold), dtype=x.dtype) + elif max_value == 6: + x = backend.nn.relu6(x) + clip_max = False + else: + x = backend.nn.relu(x) + if clip_max: + min_value = ops.cast(0.0, dtype=x.dtype) + max_value = ops.cast(max_value, dtype=x.dtype) + x = backend.numpy.clip(x, min_value, max_value) + if negative_slope != 0.0: + x -= negative_slope * negative_part + return x + +@keras_export('keras.activations.leaky_relu') +def leaky_relu(x, negative_slope=0.2): + return ops.leaky_relu(x, negative_slope=negative_slope) + +@keras_export('keras.activations.relu6') +def relu6(x): + return ops.relu6(x) + +@keras_export('keras.activations.softmax') +def softmax(x, axis=-1): + output = ops.softmax(x, axis=axis) + try: + output._keras_logits = x + except AttributeError: + pass + return output + +@keras_export('keras.activations.elu') +def elu(x, alpha=1.0): + return ops.elu(x, alpha=alpha) + +@keras_export('keras.activations.selu') +def selu(x): + return ops.selu(x) + +@keras_export('keras.activations.softplus') +def softplus(x): + return ops.softplus(x) + +@keras_export('keras.activations.softsign') +def softsign(x): + return ops.softsign(x) + +@keras_export(['keras.activations.silu', 'keras.activations.swish']) +def silu(x): + return ops.silu(x) + +@keras_export('keras.activations.gelu') +def gelu(x, approximate=False): + return ops.gelu(x, approximate=approximate) + +@keras_export('keras.activations.tanh') +def tanh(x): + return ops.tanh(x) + +@keras_export('keras.activations.sigmoid') +def sigmoid(x): + output = ops.sigmoid(x) + try: + output._keras_logits = x + except AttributeError: + pass + return output + +@keras_export('keras.activations.exponential') +def exponential(x): + return ops.exp(x) + +@keras_export('keras.activations.hard_sigmoid') +def hard_sigmoid(x): + return ops.hard_sigmoid(x) + +@keras_export(['keras.activations.hard_silu', 'keras.activations.hard_swish']) +def hard_silu(x): + x = backend.convert_to_tensor(x) + return ops.hard_silu(x) + +@keras_export('keras.activations.linear') +def linear(x): + return x + +class Mish(ops.Operation): + + def call(self, x): + return self.static_call(x) + + def compute_output_spec(self, x): + return backend.KerasTensor(x.shape, x.dtype) + + @staticmethod + def static_call(x): + return x * backend.nn.tanh(backend.nn.softplus(x)) + +@keras_export('keras.activations.mish') +def mish(x): + x = backend.convert_to_tensor(x) + return Mish.static_call(x) + +@keras_export('keras.activations.log_softmax') +def log_softmax(x, axis=-1): + return ops.log_softmax(x, axis=axis) + +# File: keras-master/keras/src/api_export.py +try: + import namex +except ImportError: + namex = None +REGISTERED_NAMES_TO_OBJS = {} +REGISTERED_OBJS_TO_NAMES = {} + +def register_internal_serializable(path, symbol): + global REGISTERED_NAMES_TO_OBJS + if isinstance(path, (list, tuple)): + name = path[0] + else: + name = path + REGISTERED_NAMES_TO_OBJS[name] = symbol + REGISTERED_OBJS_TO_NAMES[symbol] = name + +def get_symbol_from_name(name): + return REGISTERED_NAMES_TO_OBJS.get(name, None) + +def get_name_from_symbol(symbol): + return REGISTERED_OBJS_TO_NAMES.get(symbol, None) +if namex: + + class keras_export(namex.export): + + def __init__(self, path): + super().__init__(package='keras', path=path) + + def __call__(self, symbol): + register_internal_serializable(self.path, symbol) + return super().__call__(symbol) +else: + + class keras_export: + + def __init__(self, path): + self.path = path + + def __call__(self, symbol): + register_internal_serializable(self.path, symbol) + return symbol + +# File: keras-master/keras/src/applications/convnext.py +import numpy as np +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import ops +from keras.src import random +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.layers.layer import Layer +from keras.src.models import Functional +from keras.src.models import Sequential +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/convnext/' +WEIGHTS_HASHES = {'convnext_tiny': ('8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff', 'd547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1'), 'convnext_small': ('ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c', '6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab'), 'convnext_base': ('52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6', '40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45'), 'convnext_large': ('070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6', '96f02b6f0753d4f543261bc9d09bed650f24dd6bc02ddde3066135b63d23a1cd'), 'convnext_xlarge': ('c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee', 'de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05')} +MODEL_CONFIGS = {'tiny': {'depths': [3, 3, 9, 3], 'projection_dims': [96, 192, 384, 768], 'default_size': 224}, 'small': {'depths': [3, 3, 27, 3], 'projection_dims': [96, 192, 384, 768], 'default_size': 224}, 'base': {'depths': [3, 3, 27, 3], 'projection_dims': [128, 256, 512, 1024], 'default_size': 224}, 'large': {'depths': [3, 3, 27, 3], 'projection_dims': [192, 384, 768, 1536], 'default_size': 224}, 'xlarge': {'depths': [3, 3, 27, 3], 'projection_dims': [256, 512, 1024, 2048], 'default_size': 224}} +BASE_DOCSTRING = 'Instantiates the {name} architecture.\n\nReferences:\n- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)\n(CVPR 2022)\n\nFor image classification use cases, see\n[this page for detailed examples](\nhttps://keras.io/api/applications/#usage-examples-for-image-classification-models).\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\nhttps://keras.io/guides/transfer_learning/).\n\nThe `base`, `large`, and `xlarge` models were first pre-trained on the\nImageNet-21k dataset and then fine-tuned on the ImageNet-1k dataset. The\npre-trained parameters of the models were assembled from the\n[official repository](https://github.com/facebookresearch/ConvNeXt). To get a\nsense of how these parameters were converted to Keras compatible parameters,\nplease refer to\n[this repository](https://github.com/sayakpaul/keras-convnext-conversion).\n\nNote: Each Keras Application expects a specific kind of input preprocessing.\nFor ConvNeXt, preprocessing is included in the model using a `Normalization`\nlayer. ConvNeXt models expect their inputs to be float or uint8 tensors of\npixels with values in the [0-255] range.\n\nWhen calling the `summary()` method after instantiating a ConvNeXt model,\nprefer setting the `expand_nested` argument `summary()` to `True` to better\ninvestigate the instantiated model.\n\nArgs:\n include_top: Whether to include the fully-connected\n layer at the top of the network. Defaults to `True`.\n weights: One of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet-1k), or the path to the weights\n file to be loaded. Defaults to `"imagenet"`.\n input_tensor: Optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is `False`.\n It should have exactly 3 inputs channels.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`. Defaults to None.\n - `None` means that the output of the model will be\n the 4D tensor output of the last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is `True`, and\n if no `weights` argument is specified. Defaults to 1000 (number of\n ImageNet classes).\n classifier_activation: A `str` or callable. The activation function to use\n on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n Defaults to `"softmax"`.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A model instance.\n' + +class StochasticDepth(Layer): + + def __init__(self, drop_path_rate, **kwargs): + super().__init__(**kwargs) + self.drop_path_rate = drop_path_rate + + def call(self, x, training=None): + if training: + keep_prob = 1 - self.drop_path_rate + shape = (ops.shape(x)[0],) + (1,) * (len(ops.shape(x)) - 1) + random_tensor = keep_prob + random.uniform(shape, 0, 1) + random_tensor = ops.floor(random_tensor) + return x / keep_prob * random_tensor + return x + + def get_config(self): + config = super().get_config() + config.update({'drop_path_rate': self.drop_path_rate}) + return config + +class LayerScale(Layer): + + def __init__(self, init_values, projection_dim, **kwargs): + super().__init__(**kwargs) + self.init_values = init_values + self.projection_dim = projection_dim + + def build(self, _): + self.gamma = self.add_weight(shape=(self.projection_dim,), initializer=initializers.Constant(self.init_values), trainable=True) + + def call(self, x): + return x * self.gamma + + def get_config(self): + config = super().get_config() + config.update({'init_values': self.init_values, 'projection_dim': self.projection_dim}) + return config + +def ConvNeXtBlock(projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-06, name=None): + if name is None: + name = 'prestem' + str(backend.get_uid('prestem')) + + def apply(inputs): + x = inputs + x = layers.Conv2D(filters=projection_dim, kernel_size=7, padding='same', groups=projection_dim, name=name + '_depthwise_conv')(x) + x = layers.LayerNormalization(epsilon=1e-06, name=name + '_layernorm')(x) + x = layers.Dense(4 * projection_dim, name=name + '_pointwise_conv_1')(x) + x = layers.Activation('gelu', name=name + '_gelu')(x) + x = layers.Dense(projection_dim, name=name + '_pointwise_conv_2')(x) + if layer_scale_init_value is not None: + x = LayerScale(layer_scale_init_value, projection_dim, name=name + '_layer_scale')(x) + if drop_path_rate: + layer = StochasticDepth(drop_path_rate, name=name + '_stochastic_depth') + else: + layer = layers.Activation('linear', name=name + '_identity') + return inputs + layer(x) + return apply + +def PreStem(name=None): + if name is None: + name = 'prestem' + str(backend.get_uid('prestem')) + + def apply(x): + x = layers.Normalization(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2], name=name + '_prestem_normalization')(x) + return x + return apply + +def Head(num_classes=1000, classifier_activation=None, name=None): + if name is None: + name = str(backend.get_uid('head')) + + def apply(x): + x = layers.GlobalAveragePooling2D(name=name + '_head_gap')(x) + x = layers.LayerNormalization(epsilon=1e-06, name=name + '_head_layernorm')(x) + x = layers.Dense(num_classes, activation=classifier_activation, name=name + '_head_dense')(x) + return x + return apply + +def ConvNeXt(depths, projection_dims, drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=224, name='convnext', include_preprocessing=True, include_top=True, weights=None, input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', weights_name=None): + if backend.image_data_format() == 'channels_first': + raise ValueError('ConvNeXt does not support the `channels_first` image data format. Switch to `channels_last` by editing your local config file at ~/.keras/keras.json') + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f'If using `weights="imagenet"` with `include_top=True`, `classes` should be 1000. Received classes={classes}') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor)[0] + else: + inputs = img_input + x = inputs + if include_preprocessing: + channel_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + num_channels = input_shape[channel_axis - 1] + if num_channels == 3: + x = PreStem(name=name)(x) + stem = Sequential([layers.Conv2D(projection_dims[0], kernel_size=4, strides=4, name=name + '_stem_conv'), layers.LayerNormalization(epsilon=1e-06, name=name + '_stem_layernorm')], name=name + '_stem') + downsample_layers = [] + downsample_layers.append(stem) + num_downsample_layers = 3 + for i in range(num_downsample_layers): + downsample_layer = Sequential([layers.LayerNormalization(epsilon=1e-06, name=name + '_downsampling_layernorm_' + str(i)), layers.Conv2D(projection_dims[i + 1], kernel_size=2, strides=2, name=name + '_downsampling_conv_' + str(i))], name=name + '_downsampling_block_' + str(i)) + downsample_layers.append(downsample_layer) + depth_drop_rates = [float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths))] + cur = 0 + num_convnext_blocks = 4 + for i in range(num_convnext_blocks): + x = downsample_layers[i](x) + for j in range(depths[i]): + x = ConvNeXtBlock(projection_dim=projection_dims[i], drop_path_rate=depth_drop_rates[cur + j], layer_scale_init_value=layer_scale_init_value, name=name + f'_stage_{i}_block_{j}')(x) + cur += depths[i] + if include_top: + imagenet_utils.validate_activation(classifier_activation, weights) + x = Head(num_classes=classes, classifier_activation=classifier_activation, name=name)(x) + else: + if pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + x = layers.LayerNormalization(epsilon=1e-06)(x) + model = Functional(inputs=inputs, outputs=x, name=name) + if weights == 'imagenet': + if include_top: + file_suffix = '.h5' + file_hash = WEIGHTS_HASHES[weights_name][0] + else: + file_suffix = '_notop.h5' + file_hash = WEIGHTS_HASHES[weights_name][1] + file_name = name + file_suffix + weights_path = file_utils.get_file(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export(['keras.applications.convnext.ConvNeXtTiny', 'keras.applications.ConvNeXtTiny']) +def ConvNeXtTiny(include_top=True, include_preprocessing=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='convnext_tiny'): + return ConvNeXt(weights_name='convnext_tiny', depths=MODEL_CONFIGS['tiny']['depths'], projection_dims=MODEL_CONFIGS['tiny']['projection_dims'], drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=MODEL_CONFIGS['tiny']['default_size'], name=name, include_top=include_top, include_preprocessing=include_preprocessing, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.convnext.ConvNeXtSmall', 'keras.applications.ConvNeXtSmall']) +def ConvNeXtSmall(include_top=True, include_preprocessing=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='convnext_small'): + return ConvNeXt(weights_name='convnext_small', depths=MODEL_CONFIGS['small']['depths'], projection_dims=MODEL_CONFIGS['small']['projection_dims'], drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=MODEL_CONFIGS['small']['default_size'], name=name, include_top=include_top, include_preprocessing=include_preprocessing, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.convnext.ConvNeXtBase', 'keras.applications.ConvNeXtBase']) +def ConvNeXtBase(include_top=True, include_preprocessing=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='convnext_base'): + return ConvNeXt(weights_name='convnext_base', depths=MODEL_CONFIGS['base']['depths'], projection_dims=MODEL_CONFIGS['base']['projection_dims'], drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=MODEL_CONFIGS['base']['default_size'], name=name, include_top=include_top, include_preprocessing=include_preprocessing, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.convnext.ConvNeXtLarge', 'keras.applications.ConvNeXtLarge']) +def ConvNeXtLarge(include_top=True, include_preprocessing=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='convnext_large'): + return ConvNeXt(weights_name='convnext_large', depths=MODEL_CONFIGS['large']['depths'], projection_dims=MODEL_CONFIGS['large']['projection_dims'], drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=MODEL_CONFIGS['large']['default_size'], name=name, include_top=include_top, include_preprocessing=include_preprocessing, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.convnext.ConvNeXtXLarge', 'keras.applications.ConvNeXtXLarge']) +def ConvNeXtXLarge(include_top=True, include_preprocessing=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='convnext_xlarge'): + return ConvNeXt(weights_name='convnext_xlarge', depths=MODEL_CONFIGS['xlarge']['depths'], projection_dims=MODEL_CONFIGS['xlarge']['projection_dims'], drop_path_rate=0.0, layer_scale_init_value=1e-06, default_size=MODEL_CONFIGS['xlarge']['default_size'], name=name, include_top=include_top, include_preprocessing=include_preprocessing, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) +ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name='ConvNeXtTiny') +ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name='ConvNeXtSmall') +ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name='ConvNeXtBase') +ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name='ConvNeXtLarge') +ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name='ConvNeXtXLarge') + +@keras_export('keras.applications.convnext.preprocess_input') +def preprocess_input(x, data_format=None): + return x + +@keras_export('keras.applications.convnext.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/densenet.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/densenet/' +DENSENET121_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5' +DENSENET121_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5' +DENSENET169_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5' +DENSENET169_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5' +DENSENET201_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5' +DENSENET201_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5' + +def dense_block(x, blocks, name): + for i in range(blocks): + x = conv_block(x, 32, name=name + '_block' + str(i + 1)) + return x + +def transition_block(x, reduction, name): + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_bn')(x) + x = layers.Activation('relu', name=name + '_relu')(x) + x = layers.Conv2D(int(x.shape[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x) + x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x) + return x + +def conv_block(x, growth_rate, name): + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(x) + x1 = layers.Activation('relu', name=name + '_0_relu')(x1) + x1 = layers.Conv2D(4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(x1) + x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x1) + x1 = layers.Activation('relu', name=name + '_1_relu')(x1) + x1 = layers.Conv2D(growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(x1) + x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1]) + return x + +def DenseNet(blocks, include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='densenet'): + if backend.image_data_format() == 'channels_first': + raise ValueError('DenseNet does not support the `channels_first` image data format. Switch to `channels_last` by editing your local config file at ~/.keras/keras.json') + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) + x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='conv1_bn')(x) + x = layers.Activation('relu', name='conv1_relu')(x) + x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x) + x = layers.MaxPooling2D(3, strides=2, name='pool1')(x) + x = dense_block(x, blocks[0], name='conv2') + x = transition_block(x, 0.5, name='pool2') + x = dense_block(x, blocks[1], name='conv3') + x = transition_block(x, 0.5, name='pool3') + x = dense_block(x, blocks[2], name='conv4') + x = transition_block(x, 0.5, name='pool4') + x = dense_block(x, blocks[3], name='conv5') + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='bn')(x) + x = layers.Activation('relu', name='relu')(x) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D(name='max_pool')(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + if blocks == [6, 12, 24, 16]: + weights_path = file_utils.get_file('densenet121_weights_tf_dim_ordering_tf_kernels.h5', DENSENET121_WEIGHT_PATH, cache_subdir='models', file_hash='9d60b8095a5708f2dcce2bca79d332c7') + elif blocks == [6, 12, 32, 32]: + weights_path = file_utils.get_file('densenet169_weights_tf_dim_ordering_tf_kernels.h5', DENSENET169_WEIGHT_PATH, cache_subdir='models', file_hash='d699b8f76981ab1b30698df4c175e90b') + elif blocks == [6, 12, 48, 32]: + weights_path = file_utils.get_file('densenet201_weights_tf_dim_ordering_tf_kernels.h5', DENSENET201_WEIGHT_PATH, cache_subdir='models', file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807') + elif blocks == [6, 12, 24, 16]: + weights_path = file_utils.get_file('densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET121_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='30ee3e1110167f948a6b9946edeeb738') + elif blocks == [6, 12, 32, 32]: + weights_path = file_utils.get_file('densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET169_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='b8c4d4c20dd625c148057b9ff1c1176b') + elif blocks == [6, 12, 48, 32]: + weights_path = file_utils.get_file('densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET201_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='c13680b51ded0fb44dff2d8f86ac8bb1') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export(['keras.applications.densenet.DenseNet121', 'keras.applications.DenseNet121']) +def DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='densenet121'): + return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation, name=name) + +@keras_export(['keras.applications.densenet.DenseNet169', 'keras.applications.DenseNet169']) +def DenseNet169(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='densenet169'): + return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation, name=name) + +@keras_export(['keras.applications.densenet.DenseNet201', 'keras.applications.DenseNet201']) +def DenseNet201(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='densenet201'): + return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation, name=name) + +@keras_export('keras.applications.densenet.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='torch') + +@keras_export('keras.applications.densenet.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ +DOC = '\n\nReference:\n- [Densely Connected Convolutional Networks](\n https://arxiv.org/abs/1608.06993) (CVPR 2017)\n\nOptionally loads weights pre-trained on ImageNet.\nNote that the data format convention used by the model is\nthe one specified in your Keras config at `~/.keras/keras.json`.\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor DenseNet, call `keras.applications.densenet.preprocess_input`\non your inputs before passing them to the model.\n\nArgs:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `\'channels_last\'` data format)\n or `(3, 224, 224)` (with `\'channels_first\'` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional block.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional block, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is `True`, and\n if no `weights` argument is specified. Defaults to 1000.\n classifier_activation: A `str` or callable.\n The activation function to use\n on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits\n of the "top" layer. When loading pretrained weights,\n `classifier_activation` can only be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A Keras model instance.\n' +setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC) +setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC) +setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC) + +# File: keras-master/keras/src/applications/efficientnet.py +import copy +import math +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/keras-applications/' +WEIGHTS_HASHES = {'b0': ('902e53a9f72be733fc0bcb005b3ebbac', '50bc09e76180e00e4465e1a485ddc09d'), 'b1': ('1d254153d4ab51201f1646940f018540', '74c4e6b3e1f6a1eea24c589628592432'), 'b2': ('b15cce36ff4dcbd00b6dd88e7857a6ad', '111f8e2ac8aa800a7a99e3239f7bfb39'), 'b3': ('ffd1fdc53d0ce67064dc6a9c7960ede0', 'af6d107764bb5b1abb91932881670226'), 'b4': ('18c95ad55216b8f92d7e70b3a046e2fc', 'ebc24e6d6c33eaebbd558eafbeedf1ba'), 'b5': ('ace28f2a6363774853a83a0b21b9421a', '38879255a25d3c92d5e44e04ae6cec6f'), 'b6': ('165f6e37dce68623721b423839de8be5', '9ecce42647a20130c1f39a5d4cb75743'), 'b7': ('8c03f828fec3ef71311cd463b6759d99', 'cbcfe4450ddf6f3ad90b1b398090fe4a')} +DEFAULT_BLOCKS_ARGS = [{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16, 'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112, 'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320, 'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}] +CONV_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling', 'config': {'scale': 2.0, 'mode': 'fan_out', 'distribution': 'truncated_normal'}} +DENSE_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling', 'config': {'scale': 1.0 / 3.0, 'mode': 'fan_out', 'distribution': 'uniform'}} +BASE_DOCSTRING = 'Instantiates the {name} architecture.\n\nReference:\n- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](\n https://arxiv.org/abs/1905.11946) (ICML 2019)\n\nThis function returns a Keras image classification model,\noptionally loaded with weights pre-trained on ImageNet.\n\nFor image classification use cases, see\n[this page for detailed examples](\nhttps://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\nhttps://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor EfficientNet, input preprocessing is included as part of the model\n(as a `Rescaling` layer), and thus\n`keras.applications.efficientnet.preprocess_input` is actually a\npass-through function. EfficientNet models expect their inputs to be float\ntensors of pixels with values in the `[0-255]` range.\n\nArgs:\n include_top: Whether to include the fully-connected\n layer at the top of the network. Defaults to `True`.\n weights: One of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n Defaults to `"imagenet"`.\n input_tensor: Optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False.\n It should have exactly 3 inputs channels.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`. Defaults to `None`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified. 1000 is how many\n ImageNet classes there are. Defaults to `1000`.\n classifier_activation: A `str` or callable. The activation function to use\n on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n Defaults to `\'softmax\'`.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A model instance.\n' +IMAGENET_STDDEV_RGB = [0.229, 0.224, 0.225] + +def EfficientNet(width_coefficient, depth_coefficient, default_size, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, activation='swish', blocks_args='default', name='efficientnet', include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', weights_name=None): + if blocks_args == 'default': + blocks_args = DEFAULT_BLOCKS_ARGS + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError('If using `weights="imagenet"` with `include_top` as true, `classes` should be 1000') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + + def round_filters(filters, divisor=depth_divisor): + filters *= width_coefficient + new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters: + new_filters += divisor + return int(new_filters) + + def round_repeats(repeats): + return int(math.ceil(depth_coefficient * repeats)) + x = img_input + x = layers.Rescaling(1.0 / 255.0)(x) + x = layers.Normalization(axis=bn_axis)(x) + if weights == 'imagenet': + x = layers.Rescaling([1.0 / math.sqrt(stddev) for stddev in IMAGENET_STDDEV_RGB])(x) + x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, 3), name='stem_conv_pad')(x) + x = layers.Conv2D(round_filters(32), 3, strides=2, padding='valid', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name='stem_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x) + x = layers.Activation(activation, name='stem_activation')(x) + blocks_args = copy.deepcopy(blocks_args) + b = 0 + blocks = float(sum((round_repeats(args['repeats']) for args in blocks_args))) + for (i, args) in enumerate(blocks_args): + assert args['repeats'] > 0 + args['filters_in'] = round_filters(args['filters_in']) + args['filters_out'] = round_filters(args['filters_out']) + for j in range(round_repeats(args.pop('repeats'))): + if j > 0: + args['strides'] = 1 + args['filters_in'] = args['filters_out'] + x = block(x, activation, drop_connect_rate * b / blocks, name=f'block{i + 1}{chr(j + 97)}_', **args) + b += 1 + x = layers.Conv2D(round_filters(1280), 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name='top_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x) + x = layers.Activation(activation, name='top_activation')(x) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + if dropout_rate > 0: + x = layers.Dropout(dropout_rate, name='top_dropout')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, kernel_initializer=DENSE_KERNEL_INITIALIZER, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D(name='max_pool')(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + file_suffix = '.h5' + file_hash = WEIGHTS_HASHES[weights_name][0] + else: + file_suffix = '_notop.h5' + file_hash = WEIGHTS_HASHES[weights_name][1] + file_name = name + file_suffix + weights_path = file_utils.get_file(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def block(inputs, activation='swish', drop_rate=0.0, name='', filters_in=32, filters_out=16, kernel_size=3, strides=1, expand_ratio=1, se_ratio=0.0, id_skip=True): + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + filters = filters_in * expand_ratio + if expand_ratio != 1: + x = layers.Conv2D(filters, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'expand_conv')(inputs) + x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x) + x = layers.Activation(activation, name=name + 'expand_activation')(x) + else: + x = inputs + if strides == 2: + x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, kernel_size), name=name + 'dwconv_pad')(x) + conv_pad = 'valid' + else: + conv_pad = 'same' + x = layers.DepthwiseConv2D(kernel_size, strides=strides, padding=conv_pad, use_bias=False, depthwise_initializer=CONV_KERNEL_INITIALIZER, name=name + 'dwconv')(x) + x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x) + x = layers.Activation(activation, name=name + 'activation')(x) + if 0 < se_ratio <= 1: + filters_se = max(1, int(filters_in * se_ratio)) + se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x) + if bn_axis == 1: + se_shape = (filters, 1, 1) + else: + se_shape = (1, 1, filters) + se = layers.Reshape(se_shape, name=name + 'se_reshape')(se) + se = layers.Conv2D(filters_se, 1, padding='same', activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_reduce')(se) + se = layers.Conv2D(filters, 1, padding='same', activation='sigmoid', kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_expand')(se) + x = layers.multiply([x, se], name=name + 'se_excite') + x = layers.Conv2D(filters_out, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'project_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x) + if id_skip and strides == 1 and (filters_in == filters_out): + if drop_rate > 0: + x = layers.Dropout(drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x) + x = layers.add([x, inputs], name=name + 'add') + return x + +@keras_export(['keras.applications.efficientnet.EfficientNetB0', 'keras.applications.EfficientNetB0']) +def EfficientNetB0(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb0'): + return EfficientNet(1.0, 1.0, 224, 0.2, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b0') + +@keras_export(['keras.applications.efficientnet.EfficientNetB1', 'keras.applications.EfficientNetB1']) +def EfficientNetB1(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb1'): + return EfficientNet(1.0, 1.1, 240, 0.2, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b1') + +@keras_export(['keras.applications.efficientnet.EfficientNetB2', 'keras.applications.EfficientNetB2']) +def EfficientNetB2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb2'): + return EfficientNet(1.1, 1.2, 260, 0.3, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b2') + +@keras_export(['keras.applications.efficientnet.EfficientNetB3', 'keras.applications.EfficientNetB3']) +def EfficientNetB3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb3'): + return EfficientNet(1.2, 1.4, 300, 0.3, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b3') + +@keras_export(['keras.applications.efficientnet.EfficientNetB4', 'keras.applications.EfficientNetB4']) +def EfficientNetB4(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb4'): + return EfficientNet(1.4, 1.8, 380, 0.4, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b4') + +@keras_export(['keras.applications.efficientnet.EfficientNetB5', 'keras.applications.EfficientNetB5']) +def EfficientNetB5(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb5'): + return EfficientNet(1.6, 2.2, 456, 0.4, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b5') + +@keras_export(['keras.applications.efficientnet.EfficientNetB6', 'keras.applications.EfficientNetB6']) +def EfficientNetB6(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb6'): + return EfficientNet(1.8, 2.6, 528, 0.5, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b6') + +@keras_export(['keras.applications.efficientnet.EfficientNetB7', 'keras.applications.EfficientNetB7']) +def EfficientNetB7(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='efficientnetb7'): + return EfficientNet(2.0, 3.1, 600, 0.5, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, weights_name='b7') +EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB0') +EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB1') +EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB2') +EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3') +EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB4') +EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB5') +EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB6') +EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB7') + +@keras_export('keras.applications.efficientnet.preprocess_input') +def preprocess_input(x, data_format=None): + return x + +@keras_export('keras.applications.efficientnet.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/efficientnet_v2.py +import copy +import math +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/' +WEIGHTS_HASHES = {'b0': ('21ecbf6da12460d5c40bb2f29ceb2188', '893217f2bb855e2983157299931e43ff'), 'b1': ('069f0534ff22adf035c89e2d9547a9dc', '0e80663031ca32d657f9caa404b6ec37'), 'b2': ('424e49f28180edbde1e94797771950a7', '1dfe2e7a5d45b6632553a8961ea609eb'), 'b3': ('1f1fc43bd98a6e4fd8fdfd551e02c7a0', 'f6abf7b5849ac99a89b50dd3fd532856'), '-s': ('e1d88a8495beba45748fedd0cecbe016', 'af0682fb74e8c54910f2d4393339c070'), '-m': ('a3bf6aa3276309f4fc6a34aa114c95cd', '1b8dc055df72dde80d614482840fe342'), '-l': ('27e6d408b53c7ebc868fefa357689935', 'b0b66b5c863aef5b46e8608fe1711615')} +DEFAULT_BLOCKS_ARGS = {'efficientnetv2-s': [{'kernel_size': 3, 'num_repeat': 2, 'input_filters': 24, 'output_filters': 24, 'expand_ratio': 1, 'se_ratio': 0.0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 4, 'input_filters': 24, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0.0, 'strides': 2, 'conv_type': 1}, {'conv_type': 1, 'expand_ratio': 4, 'input_filters': 48, 'kernel_size': 3, 'num_repeat': 4, 'output_filters': 64, 'se_ratio': 0, 'strides': 2}, {'conv_type': 0, 'expand_ratio': 4, 'input_filters': 64, 'kernel_size': 3, 'num_repeat': 6, 'output_filters': 128, 'se_ratio': 0.25, 'strides': 2}, {'conv_type': 0, 'expand_ratio': 6, 'input_filters': 128, 'kernel_size': 3, 'num_repeat': 9, 'output_filters': 160, 'se_ratio': 0.25, 'strides': 1}, {'conv_type': 0, 'expand_ratio': 6, 'input_filters': 160, 'kernel_size': 3, 'num_repeat': 15, 'output_filters': 256, 'se_ratio': 0.25, 'strides': 2}], 'efficientnetv2-m': [{'kernel_size': 3, 'num_repeat': 3, 'input_filters': 24, 'output_filters': 24, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 24, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 48, 'output_filters': 80, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 7, 'input_filters': 80, 'output_filters': 160, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 14, 'input_filters': 160, 'output_filters': 176, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 18, 'input_filters': 176, 'output_filters': 304, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 304, 'output_filters': 512, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}], 'efficientnetv2-l': [{'kernel_size': 3, 'num_repeat': 4, 'input_filters': 32, 'output_filters': 32, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 7, 'input_filters': 32, 'output_filters': 64, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 7, 'input_filters': 64, 'output_filters': 96, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 10, 'input_filters': 96, 'output_filters': 192, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 19, 'input_filters': 192, 'output_filters': 224, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 25, 'input_filters': 224, 'output_filters': 384, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 7, 'input_filters': 384, 'output_filters': 640, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}], 'efficientnetv2-b0': [{'kernel_size': 3, 'num_repeat': 1, 'input_filters': 32, 'output_filters': 16, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 16, 'output_filters': 32, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 32, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 3, 'input_filters': 48, 'output_filters': 96, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 96, 'output_filters': 112, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 8, 'input_filters': 112, 'output_filters': 192, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}], 'efficientnetv2-b1': [{'kernel_size': 3, 'num_repeat': 1, 'input_filters': 32, 'output_filters': 16, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 16, 'output_filters': 32, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 32, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 3, 'input_filters': 48, 'output_filters': 96, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 96, 'output_filters': 112, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 8, 'input_filters': 112, 'output_filters': 192, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}], 'efficientnetv2-b2': [{'kernel_size': 3, 'num_repeat': 1, 'input_filters': 32, 'output_filters': 16, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 16, 'output_filters': 32, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 32, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 3, 'input_filters': 48, 'output_filters': 96, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 96, 'output_filters': 112, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 8, 'input_filters': 112, 'output_filters': 192, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}], 'efficientnetv2-b3': [{'kernel_size': 3, 'num_repeat': 1, 'input_filters': 32, 'output_filters': 16, 'expand_ratio': 1, 'se_ratio': 0, 'strides': 1, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 16, 'output_filters': 32, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 2, 'input_filters': 32, 'output_filters': 48, 'expand_ratio': 4, 'se_ratio': 0, 'strides': 2, 'conv_type': 1}, {'kernel_size': 3, 'num_repeat': 3, 'input_filters': 48, 'output_filters': 96, 'expand_ratio': 4, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 5, 'input_filters': 96, 'output_filters': 112, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 1, 'conv_type': 0}, {'kernel_size': 3, 'num_repeat': 8, 'input_filters': 112, 'output_filters': 192, 'expand_ratio': 6, 'se_ratio': 0.25, 'strides': 2, 'conv_type': 0}]} +CONV_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling', 'config': {'scale': 2.0, 'mode': 'fan_out', 'distribution': 'truncated_normal'}} +DENSE_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling', 'config': {'scale': 1.0 / 3.0, 'mode': 'fan_out', 'distribution': 'uniform'}} +BASE_DOCSTRING = 'Instantiates the {name} architecture.\n\nReference:\n- [EfficientNetV2: Smaller Models and Faster Training](\n https://arxiv.org/abs/2104.00298) (ICML 2021)\n\nThis function returns a Keras image classification model,\noptionally loaded with weights pre-trained on ImageNet.\n\nFor image classification use cases, see\n[this page for detailed examples](\nhttps://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\nhttps://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor EfficientNetV2, by default input preprocessing is included as a part of\nthe model (as a `Rescaling` layer), and thus\n`keras.applications.efficientnet_v2.preprocess_input` is actually a\npass-through function. In this use case, EfficientNetV2 models expect their\ninputs to be float tensors of pixels with values in the `[0, 255]` range.\nAt the same time, preprocessing as a part of the model (i.e. `Rescaling`\nlayer) can be disabled by setting `include_preprocessing` argument to `False`.\nWith preprocessing disabled EfficientNetV2 models expect their inputs to be\nfloat tensors of pixels with values in the `[-1, 1]` range.\n\nArgs:\n include_top: Boolean, whether to include the fully-connected\n layer at the top of the network. Defaults to `True`.\n weights: One of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet),\n or the path to the weights file to be loaded. Defaults to `"imagenet"`.\n input_tensor: Optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is `False`.\n It should have exactly 3 inputs channels.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`. Defaults to None.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `"avg"` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `"max"` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is `True`, and\n if no `weights` argument is specified. Defaults to 1000 (number of\n ImageNet classes).\n classifier_activation: A string or callable. The activation function to use\n on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n Defaults to `"softmax"`.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A model instance.\n' + +def round_filters(filters, width_coefficient, min_depth, depth_divisor): + filters *= width_coefficient + minimum_depth = min_depth or depth_divisor + new_filters = max(minimum_depth, int(filters + depth_divisor / 2) // depth_divisor * depth_divisor) + return int(new_filters) + +def round_repeats(repeats, depth_coefficient): + return int(math.ceil(depth_coefficient * repeats)) + +def MBConvBlock(input_filters, output_filters, expand_ratio=1, kernel_size=3, strides=1, se_ratio=0.0, bn_momentum=0.9, activation='swish', survival_probability=0.8, name=None): + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + if name is None: + name = backend.get_uid('block0') + + def apply(inputs): + filters = input_filters * expand_ratio + if expand_ratio != 1: + x = layers.Conv2D(filters=filters, kernel_size=1, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', data_format=backend.image_data_format(), use_bias=False, name=name + 'expand_conv')(inputs) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name=name + 'expand_bn')(x) + x = layers.Activation(activation, name=name + 'expand_activation')(x) + else: + x = inputs + x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, depthwise_initializer=CONV_KERNEL_INITIALIZER, padding='same', data_format=backend.image_data_format(), use_bias=False, name=name + 'dwconv2')(x) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name=name + 'bn')(x) + x = layers.Activation(activation, name=name + 'activation')(x) + if 0 < se_ratio <= 1: + filters_se = max(1, int(input_filters * se_ratio)) + se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x) + if bn_axis == 1: + se_shape = (filters, 1, 1) + else: + se_shape = (1, 1, filters) + se = layers.Reshape(se_shape, name=name + 'se_reshape')(se) + se = layers.Conv2D(filters_se, 1, padding='same', activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_reduce')(se) + se = layers.Conv2D(filters, 1, padding='same', activation='sigmoid', kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_expand')(se) + x = layers.multiply([x, se], name=name + 'se_excite') + x = layers.Conv2D(filters=output_filters, kernel_size=1, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', data_format=backend.image_data_format(), use_bias=False, name=name + 'project_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name=name + 'project_bn')(x) + if strides == 1 and input_filters == output_filters: + if survival_probability: + x = layers.Dropout(survival_probability, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x) + x = layers.add([x, inputs], name=name + 'add') + return x + return apply + +def FusedMBConvBlock(input_filters, output_filters, expand_ratio=1, kernel_size=3, strides=1, se_ratio=0.0, bn_momentum=0.9, activation='swish', survival_probability=0.8, name=None): + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + if name is None: + name = backend.get_uid('block0') + + def apply(inputs): + filters = input_filters * expand_ratio + if expand_ratio != 1: + x = layers.Conv2D(filters, kernel_size=kernel_size, strides=strides, kernel_initializer=CONV_KERNEL_INITIALIZER, data_format=backend.image_data_format(), padding='same', use_bias=False, name=name + 'expand_conv')(inputs) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name=name + 'expand_bn')(x) + x = layers.Activation(activation=activation, name=name + 'expand_activation')(x) + else: + x = inputs + if 0 < se_ratio <= 1: + filters_se = max(1, int(input_filters * se_ratio)) + se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x) + if bn_axis == 1: + se_shape = (filters, 1, 1) + else: + se_shape = (1, 1, filters) + se = layers.Reshape(se_shape, name=name + 'se_reshape')(se) + se = layers.Conv2D(filters_se, 1, padding='same', activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_reduce')(se) + se = layers.Conv2D(filters, 1, padding='same', activation='sigmoid', kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_expand')(se) + x = layers.multiply([x, se], name=name + 'se_excite') + x = layers.Conv2D(output_filters, kernel_size=1 if expand_ratio != 1 else kernel_size, strides=1 if expand_ratio != 1 else strides, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', use_bias=False, name=name + 'project_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name=name + 'project_bn')(x) + if expand_ratio == 1: + x = layers.Activation(activation=activation, name=name + 'project_activation')(x) + if strides == 1 and input_filters == output_filters: + if survival_probability: + x = layers.Dropout(survival_probability, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x) + x = layers.add([x, inputs], name=name + 'add') + return x + return apply + +def EfficientNetV2(width_coefficient, depth_coefficient, default_size, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, min_depth=8, bn_momentum=0.9, activation='swish', blocks_args='default', name='efficientnetv2', include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, weights_name=None): + if blocks_args == 'default': + blocks_args = DEFAULT_BLOCKS_ARGS[name] + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f'The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.Received: weights={weights}') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError('If using `weights="imagenet"` with `include_top` as true, `classes` should be 1000') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 + x = img_input + if include_preprocessing: + num_channels = input_shape[bn_axis - 1] + if name.split('-')[-1].startswith('b') and num_channels == 3: + x = layers.Rescaling(scale=1.0 / 255)(x) + x = layers.Normalization(mean=[0.485, 0.456, 0.406], variance=[0.229 ** 2, 0.224 ** 2, 0.225 ** 2], axis=bn_axis)(x) + else: + x = layers.Rescaling(scale=1.0 / 128.0, offset=-1)(x) + stem_filters = round_filters(filters=blocks_args[0]['input_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor) + x = layers.Conv2D(filters=stem_filters, kernel_size=3, strides=2, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', use_bias=False, name='stem_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name='stem_bn')(x) + x = layers.Activation(activation, name='stem_activation')(x) + blocks_args = copy.deepcopy(blocks_args) + b = 0 + blocks = float(sum((args['num_repeat'] for args in blocks_args))) + for (i, args) in enumerate(blocks_args): + assert args['num_repeat'] > 0 + args['input_filters'] = round_filters(filters=args['input_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor) + args['output_filters'] = round_filters(filters=args['output_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor) + block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop('conv_type')] + repeats = round_repeats(repeats=args.pop('num_repeat'), depth_coefficient=depth_coefficient) + for j in range(repeats): + if j > 0: + args['strides'] = 1 + args['input_filters'] = args['output_filters'] + x = block(activation=activation, bn_momentum=bn_momentum, survival_probability=drop_connect_rate * b / blocks, name=f'block{i + 1}{chr(j + 97)}_', **args)(x) + b += 1 + top_filters = round_filters(filters=1280, width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor) + x = layers.Conv2D(filters=top_filters, kernel_size=1, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', data_format=backend.image_data_format(), use_bias=False, name='top_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name='top_bn')(x) + x = layers.Activation(activation=activation, name='top_activation')(x) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + if dropout_rate > 0: + x = layers.Dropout(dropout_rate, name='top_dropout')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, kernel_initializer=DENSE_KERNEL_INITIALIZER, bias_initializer=initializers.Constant(0.0), name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D(name='max_pool')(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + file_suffix = '.h5' + file_hash = WEIGHTS_HASHES[weights_name][0] + else: + file_suffix = '_notop.h5' + file_hash = WEIGHTS_HASHES[weights_name][1] + file_name = name + file_suffix + weights_path = file_utils.get_file(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2B0', 'keras.applications.EfficientNetV2B0']) +def EfficientNetV2B0(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-b0'): + return EfficientNetV2(width_coefficient=1.0, depth_coefficient=1.0, default_size=224, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='b0') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2B1', 'keras.applications.EfficientNetV2B1']) +def EfficientNetV2B1(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-b1'): + return EfficientNetV2(width_coefficient=1.0, depth_coefficient=1.1, default_size=240, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='b1') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2B2', 'keras.applications.EfficientNetV2B2']) +def EfficientNetV2B2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-b2'): + return EfficientNetV2(width_coefficient=1.1, depth_coefficient=1.2, default_size=260, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='b2') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2B3', 'keras.applications.EfficientNetV2B3']) +def EfficientNetV2B3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-b3'): + return EfficientNetV2(width_coefficient=1.2, depth_coefficient=1.4, default_size=300, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='b3') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2S', 'keras.applications.EfficientNetV2S']) +def EfficientNetV2S(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-s'): + return EfficientNetV2(width_coefficient=1.0, depth_coefficient=1.0, default_size=384, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='-s') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2M', 'keras.applications.EfficientNetV2M']) +def EfficientNetV2M(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-m'): + return EfficientNetV2(width_coefficient=1.0, depth_coefficient=1.0, default_size=480, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='-m') + +@keras_export(['keras.applications.efficientnet_v2.EfficientNetV2L', 'keras.applications.EfficientNetV2L']) +def EfficientNetV2L(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, name='efficientnetv2-l'): + return EfficientNetV2(width_coefficient=1.0, depth_coefficient=1.0, default_size=480, name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, weights_name='-l') +EfficientNetV2B0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2B0') +EfficientNetV2B1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2B1') +EfficientNetV2B2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2B2') +EfficientNetV2B3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2B3') +EfficientNetV2S.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2S') +EfficientNetV2M.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2M') +EfficientNetV2L.__doc__ = BASE_DOCSTRING.format(name='EfficientNetV2L') + +@keras_export('keras.applications.efficientnet_v2.preprocess_input') +def preprocess_input(x, data_format=None): + return x + +@keras_export('keras.applications.efficientnet_v2.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/imagenet_utils.py +import json +import warnings +import numpy as np +from keras.src import activations +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.utils import file_utils +CLASS_INDEX = None +CLASS_INDEX_PATH = 'https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json' +PREPROCESS_INPUT_DOC = '\n Preprocesses a tensor or Numpy array encoding a batch of images.\n\n Usage example with `applications.MobileNet`:\n\n ```python\n i = keras.layers.Input([None, None, 3], dtype="uint8")\n x = ops.cast(i, "float32")\n x = keras.applications.mobilenet.preprocess_input(x)\n core = keras.applications.MobileNet()\n x = core(x)\n model = keras.Model(inputs=[i], outputs=[x])\n result = model(image)\n ```\n\n Args:\n x: A floating point `numpy.array` or a backend-native tensor,\n 3D or 4D with 3 color\n channels, with values in the range [0, 255].\n The preprocessed data are written over the input data\n if the data types are compatible. To avoid this\n behaviour, `numpy.copy(x)` can be used.\n data_format: Optional data format of the image tensor/array. None, means\n the global setting `keras.backend.image_data_format()` is used\n (unless you changed it, it uses "channels_last").{mode}\n Defaults to `None`.\n\n Returns:\n Preprocessed array with type `float32`.\n {ret}\n\n Raises:\n {error}\n ' +PREPROCESS_INPUT_MODE_DOC = '\n mode: One of "caffe", "tf" or "torch".\n - caffe: will convert the images from RGB to BGR,\n then will zero-center each color channel with\n respect to the ImageNet dataset,\n without scaling.\n - tf: will scale pixels between -1 and 1,\n sample-wise.\n - torch: will scale pixels between 0 and 1 and then\n will normalize each channel with respect to the\n ImageNet dataset.\n Defaults to `"caffe"`.\n ' +PREPROCESS_INPUT_DEFAULT_ERROR_DOC = '\n ValueError: In case of unknown `mode` or `data_format` argument.' +PREPROCESS_INPUT_ERROR_DOC = '\n ValueError: In case of unknown `data_format` argument.' +PREPROCESS_INPUT_RET_DOC_TF = '\n The inputs pixel values are scaled between -1 and 1, sample-wise.' +PREPROCESS_INPUT_RET_DOC_TORCH = '\n The input pixels values are scaled between 0 and 1 and each channel is\n normalized with respect to the ImageNet dataset.' +PREPROCESS_INPUT_RET_DOC_CAFFE = '\n The images are converted from RGB to BGR, then each color channel is\n zero-centered with respect to the ImageNet dataset, without scaling.' + +@keras_export('keras.applications.imagenet_utils.preprocess_input') +def preprocess_input(x, data_format=None, mode='caffe'): + if mode not in {'caffe', 'tf', 'torch'}: + raise ValueError(f'Expected mode to be one of `caffe`, `tf` or `torch`. Received: mode={mode}') + if data_format is None: + data_format = backend.image_data_format() + elif data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Expected data_format to be one of `channels_first` or `channels_last`. Received: data_format={data_format}') + if isinstance(x, np.ndarray): + return _preprocess_numpy_input(x, data_format=data_format, mode=mode) + else: + return _preprocess_tensor_input(x, data_format=data_format, mode=mode) +preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(mode=PREPROCESS_INPUT_MODE_DOC, ret='', error=PREPROCESS_INPUT_DEFAULT_ERROR_DOC) + +@keras_export('keras.applications.imagenet_utils.decode_predictions') +def decode_predictions(preds, top=5): + global CLASS_INDEX + if len(preds.shape) != 2 or preds.shape[1] != 1000: + raise ValueError(f'`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Received array with shape: {preds.shape}') + if CLASS_INDEX is None: + fpath = file_utils.get_file('imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models', file_hash='c2c37ea517e94d9795004a39431a14cb') + with open(fpath) as f: + CLASS_INDEX = json.load(f) + results = [] + preds = ops.convert_to_numpy(preds) + for pred in preds: + top_indices = pred.argsort()[-top:][::-1] + result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] + result.sort(key=lambda x: x[2], reverse=True) + results.append(result) + return results + +def _preprocess_numpy_input(x, data_format, mode): + if not issubclass(x.dtype.type, np.floating): + x = x.astype(backend.floatx(), copy=False) + if mode == 'tf': + x /= 127.5 + x -= 1.0 + return x + elif mode == 'torch': + x /= 255.0 + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + else: + if data_format == 'channels_first': + if len(x.shape) == 3: + x = x[::-1, ...] + else: + x = x[:, ::-1, ...] + else: + x = x[..., ::-1] + mean = [103.939, 116.779, 123.68] + std = None + if data_format == 'channels_first': + if len(x.shape) == 3: + x[0, :, :] -= mean[0] + x[1, :, :] -= mean[1] + x[2, :, :] -= mean[2] + if std is not None: + x[0, :, :] /= std[0] + x[1, :, :] /= std[1] + x[2, :, :] /= std[2] + else: + x[:, 0, :, :] -= mean[0] + x[:, 1, :, :] -= mean[1] + x[:, 2, :, :] -= mean[2] + if std is not None: + x[:, 0, :, :] /= std[0] + x[:, 1, :, :] /= std[1] + x[:, 2, :, :] /= std[2] + else: + x[..., 0] -= mean[0] + x[..., 1] -= mean[1] + x[..., 2] -= mean[2] + if std is not None: + x[..., 0] /= std[0] + x[..., 1] /= std[1] + x[..., 2] /= std[2] + return x + +def _preprocess_tensor_input(x, data_format, mode): + ndim = len(x.shape) + if mode == 'tf': + x /= 127.5 + x -= 1.0 + return x + elif mode == 'torch': + x /= 255.0 + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + else: + if data_format == 'channels_first': + if len(x.shape) == 3: + x = ops.stack([x[i, ...] for i in (2, 1, 0)], axis=0) + else: + x = ops.stack([x[:, i, :] for i in (2, 1, 0)], axis=1) + else: + x = ops.stack([x[..., i] for i in (2, 1, 0)], axis=-1) + mean = [103.939, 116.779, 123.68] + std = None + mean_tensor = ops.convert_to_tensor(-np.array(mean), dtype=x.dtype) + if data_format == 'channels_first': + mean_tensor = ops.reshape(mean_tensor, (1, 3) + (1,) * (ndim - 2)) + else: + mean_tensor = ops.reshape(mean_tensor, (1,) * (ndim - 1) + (3,)) + x += mean_tensor + if std is not None: + std_tensor = ops.convert_to_tensor(np.array(std), dtype=x.dtype) + if data_format == 'channels_first': + std_tensor = ops.reshape(std_tensor, (-1, 1, 1)) + x /= std_tensor + return x + +def obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None): + if weights != 'imagenet' and input_shape and (len(input_shape) == 3): + if data_format == 'channels_first': + correct_channel_axis = 1 if len(input_shape) == 4 else 0 + if input_shape[correct_channel_axis] not in {1, 3}: + warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[0]} input channels.', stacklevel=2) + default_shape = (input_shape[0], default_size, default_size) + else: + if input_shape[-1] not in {1, 3}: + warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[-1]} input channels.', stacklevel=2) + default_shape = (default_size, default_size, input_shape[-1]) + elif data_format == 'channels_first': + default_shape = (3, default_size, default_size) + else: + default_shape = (default_size, default_size, 3) + if weights == 'imagenet' and require_flatten: + if input_shape is not None: + if input_shape != default_shape: + raise ValueError(f'When setting `include_top=True` and loading `imagenet` weights, `input_shape` should be {default_shape}. Received: input_shape={input_shape}') + return default_shape + if input_shape: + if data_format == 'channels_first': + if input_shape is not None: + if len(input_shape) != 3: + raise ValueError('`input_shape` must be a tuple of three integers.') + if input_shape[0] != 3 and weights == 'imagenet': + raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`') + if input_shape[1] is not None and input_shape[1] < min_size or (input_shape[2] is not None and input_shape[2] < min_size): + raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}') + elif input_shape is not None: + if len(input_shape) != 3: + raise ValueError('`input_shape` must be a tuple of three integers.') + if input_shape[-1] != 3 and weights == 'imagenet': + raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`') + if input_shape[0] is not None and input_shape[0] < min_size or (input_shape[1] is not None and input_shape[1] < min_size): + raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}') + elif require_flatten: + input_shape = default_shape + elif data_format == 'channels_first': + input_shape = (3, None, None) + else: + input_shape = (None, None, 3) + if require_flatten: + if None in input_shape: + raise ValueError(f'If `include_top` is True, you should specify a static `input_shape`. Received: input_shape={input_shape}') + return input_shape + +def correct_pad(inputs, kernel_size): + img_dim = 2 if backend.image_data_format() == 'channels_first' else 1 + input_size = inputs.shape[img_dim:img_dim + 2] + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size) + if input_size[0] is None: + adjust = (1, 1) + else: + adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2) + correct = (kernel_size[0] // 2, kernel_size[1] // 2) + return ((correct[0] - adjust[0], correct[0]), (correct[1] - adjust[1], correct[1])) + +def validate_activation(classifier_activation, weights): + if weights is None: + return + classifier_activation = activations.get(classifier_activation) + if classifier_activation not in {activations.get('softmax'), activations.get(None)}: + raise ValueError(f'Only `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation={classifier_activation}') + +# File: keras-master/keras/src/applications/inception_resnet_v2.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.layers.layer import Layer +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHT_URL = 'https://storage.googleapis.com/tensorflow/keras-applications/inception_resnet_v2/' + +@keras_export(['keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.InceptionResNetV2']) +def InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='inception_resnet_v2'): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f'If using `weights="imagenet"` with `include_top=True`, `classes` should be 1000. Received classes={classes}') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=299, min_size=75, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid') + x = conv2d_bn(x, 32, 3, padding='valid') + x = conv2d_bn(x, 64, 3) + x = layers.MaxPooling2D(3, strides=2)(x) + x = conv2d_bn(x, 80, 1, padding='valid') + x = conv2d_bn(x, 192, 3, padding='valid') + x = layers.MaxPooling2D(3, strides=2)(x) + branch_0 = conv2d_bn(x, 96, 1) + branch_1 = conv2d_bn(x, 48, 1) + branch_1 = conv2d_bn(branch_1, 64, 5) + branch_2 = conv2d_bn(x, 64, 1) + branch_2 = conv2d_bn(branch_2, 96, 3) + branch_2 = conv2d_bn(branch_2, 96, 3) + branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 64, 1) + branches = [branch_0, branch_1, branch_2, branch_pool] + channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 + x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches) + for block_idx in range(1, 11): + x = inception_resnet_block(x, scale=0.17, block_type='block35', block_idx=block_idx) + branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid') + branch_1 = conv2d_bn(x, 256, 1) + branch_1 = conv2d_bn(branch_1, 256, 3) + branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid') + branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) + branches = [branch_0, branch_1, branch_pool] + x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches) + for block_idx in range(1, 21): + x = inception_resnet_block(x, scale=0.1, block_type='block17', block_idx=block_idx) + branch_0 = conv2d_bn(x, 256, 1) + branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid') + branch_1 = conv2d_bn(x, 256, 1) + branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid') + branch_2 = conv2d_bn(x, 256, 1) + branch_2 = conv2d_bn(branch_2, 288, 3) + branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid') + branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) + branches = [branch_0, branch_1, branch_2, branch_pool] + x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches) + for block_idx in range(1, 10): + x = inception_resnet_block(x, scale=0.2, block_type='block8', block_idx=block_idx) + x = inception_resnet_block(x, scale=1.0, activation=None, block_type='block8', block_idx=10) + x = conv2d_bn(x, 1536, 1, name='conv_7b') + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5' + weights_path = file_utils.get_file(fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='e693bd0210a403b3192acc6073ad2e96') + else: + fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5' + weights_path = file_utils.get_file(fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='d19885ff4a710c122648d3b5c3b684e4') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): + x = layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x) + if not use_bias: + bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3 + bn_name = None if name is None else name + '_bn' + x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) + if activation is not None: + ac_name = None if name is None else name + '_ac' + x = layers.Activation(activation, name=ac_name)(x) + return x + +class CustomScaleLayer(Layer): + + def __init__(self, scale, **kwargs): + super().__init__(**kwargs) + self.scale = scale + + def get_config(self): + config = super().get_config() + config.update({'scale': self.scale}) + return config + + def call(self, inputs): + return inputs[0] + inputs[1] * self.scale + +def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): + if block_type == 'block35': + branch_0 = conv2d_bn(x, 32, 1) + branch_1 = conv2d_bn(x, 32, 1) + branch_1 = conv2d_bn(branch_1, 32, 3) + branch_2 = conv2d_bn(x, 32, 1) + branch_2 = conv2d_bn(branch_2, 48, 3) + branch_2 = conv2d_bn(branch_2, 64, 3) + branches = [branch_0, branch_1, branch_2] + elif block_type == 'block17': + branch_0 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(x, 128, 1) + branch_1 = conv2d_bn(branch_1, 160, [1, 7]) + branch_1 = conv2d_bn(branch_1, 192, [7, 1]) + branches = [branch_0, branch_1] + elif block_type == 'block8': + branch_0 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(x, 192, 1) + branch_1 = conv2d_bn(branch_1, 224, [1, 3]) + branch_1 = conv2d_bn(branch_1, 256, [3, 1]) + branches = [branch_0, branch_1] + else: + raise ValueError('Unknown Inception-ResNet block type. Expects "block35", "block17" or "block8", but got: ' + str(block_type)) + block_name = block_type + '_' + str(block_idx) + channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 + mixed = layers.Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches) + up = conv2d_bn(mixed, x.shape[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv') + x = CustomScaleLayer(scale)([x, up]) + if activation is not None: + x = layers.Activation(activation, name=block_name + '_ac')(x) + return x + +@keras_export('keras.applications.inception_resnet_v2.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.inception_resnet_v2.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/inception_v3.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5' +WEIGHTS_PATH_NO_TOP = 'https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' + +@keras_export(['keras.applications.inception_v3.InceptionV3', 'keras.applications.InceptionV3']) +def InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='inception_v3'): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f'The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded; Received: weights={weights}') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f'If using `weights="imagenet"` with `include_top=True`, `classes` should be 1000. Received classes={classes}') + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=299, min_size=75, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + if backend.image_data_format() == 'channels_first': + channel_axis = 1 + else: + channel_axis = 3 + x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid') + x = conv2d_bn(x, 32, 3, 3, padding='valid') + x = conv2d_bn(x, 64, 3, 3) + x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + x = conv2d_bn(x, 80, 1, 1, padding='valid') + x = conv2d_bn(x, 192, 3, 3, padding='valid') + x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + branch1x1 = conv2d_bn(x, 64, 1, 1) + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 32, 1, 1) + x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed0') + branch1x1 = conv2d_bn(x, 64, 1, 1) + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 64, 1, 1) + x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed1') + branch1x1 = conv2d_bn(x, 64, 1, 1) + branch5x5 = conv2d_bn(x, 48, 1, 1) + branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 64, 1, 1) + x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed2') + branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid') + branch3x3dbl = conv2d_bn(x, 64, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) + branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid') + branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3') + branch1x1 = conv2d_bn(x, 192, 1, 1) + branch7x7 = conv2d_bn(x, 128, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 128, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + branch7x7dbl = conv2d_bn(x, 128, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed4') + for i in range(2): + branch1x1 = conv2d_bn(x, 192, 1, 1) + branch7x7 = conv2d_bn(x, 160, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 160, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + branch7x7dbl = conv2d_bn(x, 160, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed' + str(5 + i)) + branch1x1 = conv2d_bn(x, 192, 1, 1) + branch7x7 = conv2d_bn(x, 192, 1, 1) + branch7x7 = conv2d_bn(branch7x7, 192, 1, 7) + branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) + branch7x7dbl = conv2d_bn(x, 192, 1, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) + branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed7') + branch3x3 = conv2d_bn(x, 192, 1, 1) + branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid') + branch7x7x3 = conv2d_bn(x, 192, 1, 1) + branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7) + branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1) + branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid') + branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x) + x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8') + for i in range(2): + branch1x1 = conv2d_bn(x, 320, 1, 1) + branch3x3 = conv2d_bn(x, 384, 1, 1) + branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3) + branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1) + branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i)) + branch3x3dbl = conv2d_bn(x, 448, 1, 1) + branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3) + branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3) + branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1) + branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis) + branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) + branch_pool = conv2d_bn(branch_pool, 192, 1, 1) + x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed' + str(9 + i)) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + weights_path = file_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='9a0d58056eeedaa3f26cb7ebd46da564') + else: + weights_path = file_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='bcbd6486424b2319ff4ef7d526e38f63') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), name=None): + if name is not None: + bn_name = name + '_bn' + conv_name = name + '_conv' + else: + bn_name = None + conv_name = None + if backend.image_data_format() == 'channels_first': + bn_axis = 1 + else: + bn_axis = 3 + x = layers.Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False, name=conv_name)(x) + x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) + x = layers.Activation('relu', name=name)(x) + return x + +@keras_export('keras.applications.inception_v3.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.inception_v3.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/mobilenet.py +import warnings +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHT_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/' + +@keras_export(['keras.applications.mobilenet.MobileNet', 'keras.applications.MobileNet']) +def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, classifier_activation='softmax', name=None): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f"The `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Received weights={weights}") + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f"If using `weights='imagenet'` with `include_top=True`, `classes` should be 1000. Received classes={classes}") + if input_shape is None: + default_size = 224 + else: + if backend.image_data_format() == 'channels_first': + rows = input_shape[1] + cols = input_shape[2] + else: + rows = input_shape[0] + cols = input_shape[1] + if rows == cols and rows in [128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if backend.image_data_format() == 'channels_last': + (row_axis, col_axis) = (0, 1) + else: + (row_axis, col_axis) = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + if weights == 'imagenet': + if depth_multiplier != 1: + raise ValueError(f'If imagenet weights are being loaded, depth multiplier must be 1. Received depth_multiplier={depth_multiplier}') + if alpha not in [0.25, 0.5, 0.75, 1.0]: + raise ValueError(f'If imagenet weights are being loaded, alpha can be one of`0.25`, `0.50`, `0.75` or `1.0` only. Received alpha={alpha}') + if rows != cols or rows not in [128, 160, 192, 224]: + rows = 224 + warnings.warn('`input_shape` is undefined or non-square, or `rows` is not in [128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.', stacklevel=2) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + x = _conv_block(img_input, 32, alpha, strides=(2, 2)) + x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1) + x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2) + x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3) + x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4) + x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10) + x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11) + x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12) + x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13) + if include_top: + x = layers.GlobalAveragePooling2D(keepdims=True)(x) + x = layers.Dropout(dropout, name='dropout')(x) + x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x) + x = layers.Reshape((classes,), name='reshape_2')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Activation(activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + if name is None: + name = f'mobilenet_{alpha:0.2f}_{rows}' + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if alpha == 1.0: + alpha_text = '1_0' + elif alpha == 0.75: + alpha_text = '7_5' + elif alpha == 0.5: + alpha_text = '5_0' + else: + alpha_text = '2_5' + if include_top: + model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows) + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = file_utils.get_file(model_name, weight_path, cache_subdir='models') + else: + model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows) + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = file_utils.get_file(model_name, weight_path, cache_subdir='models') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + filters = int(filters * alpha) + x = layers.Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs) + x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) + return layers.ReLU(6.0, name='conv1_relu')(x) + +def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + pointwise_conv_filters = int(pointwise_conv_filters * alpha) + if strides == (1, 1): + x = inputs + else: + x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(inputs) + x = layers.DepthwiseConv2D((3, 3), padding='same' if strides == (1, 1) else 'valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) + x = layers.BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x) + x = layers.ReLU(6.0, name='conv_dw_%d_relu' % block_id)(x) + x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) + x = layers.BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x) + return layers.ReLU(6.0, name='conv_pw_%d_relu' % block_id)(x) + +@keras_export('keras.applications.mobilenet.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.mobilenet.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/mobilenet_v2.py +import warnings +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHT_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/' + +@keras_export(['keras.applications.mobilenet_v2.MobileNetV2', 'keras.applications.MobileNetV2']) +def MobileNetV2(input_shape=None, alpha=1.0, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, classifier_activation='softmax', name=None): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f'The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded. Received `weights={weights}`') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f'If using `weights="imagenet"` with `include_top` as true, `classes` should be 1000. Received `classes={classes}`') + if input_shape is not None and input_tensor is not None: + try: + is_input_t_tensor = backend.is_keras_tensor(input_tensor) + except ValueError: + try: + is_input_t_tensor = backend.is_keras_tensor(operation_utils.get_source_inputs(input_tensor)) + except ValueError: + raise ValueError(f'input_tensor: {input_tensor}is not type input_tensor. Received `type(input_tensor)={type(input_tensor)}`') + if is_input_t_tensor: + if backend.image_data_format() == 'channels_first': + if input_tensor.shape[1] != input_shape[1]: + raise ValueError(f'input_shape[1] must equal shape(input_tensor)[1] when `image_data_format` is `channels_first`; Received `input_tensor.shape={input_tensor.shape}`, `input_shape={input_shape}`') + elif input_tensor.shape[2] != input_shape[1]: + raise ValueError(f'input_tensor.shape[2] must equal input_shape[1]; Received `input_tensor.shape={input_tensor.shape}`, `input_shape={input_shape}`') + else: + raise ValueError(f'input_tensor is not a Keras tensor; Received `input_tensor={input_tensor}`') + if input_shape is None and input_tensor is not None: + try: + backend.is_keras_tensor(input_tensor) + except ValueError: + raise ValueError(f'input_tensor must be a valid Keras tensor type; Received {input_tensor} of type {type(input_tensor)}') + if input_shape is None and (not backend.is_keras_tensor(input_tensor)): + default_size = 224 + elif input_shape is None and backend.is_keras_tensor(input_tensor): + if backend.image_data_format() == 'channels_first': + rows = input_tensor.shape[2] + cols = input_tensor.shape[3] + else: + rows = input_tensor.shape[1] + cols = input_tensor.shape[2] + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + elif input_shape is None: + default_size = 224 + else: + if backend.image_data_format() == 'channels_first': + rows = input_shape[1] + cols = input_shape[2] + else: + rows = input_shape[0] + cols = input_shape[1] + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if backend.image_data_format() == 'channels_last': + (row_axis, col_axis) = (0, 1) + else: + (row_axis, col_axis) = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + if weights == 'imagenet': + if alpha not in [0.35, 0.5, 0.75, 1.0, 1.3, 1.4]: + raise ValueError(f'If imagenet weights are being loaded, alpha must be one of `0.35`, `0.50`, `0.75`, `1.0`, `1.3` or `1.4` only; Received `alpha={alpha}`') + if rows != cols or rows not in [96, 128, 160, 192, 224]: + rows = 224 + warnings.warn('`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.', stacklevel=2) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + first_block_filters = _make_divisible(32 * alpha, 8) + x = layers.Conv2D(first_block_filters, kernel_size=3, strides=(2, 2), padding='same', use_bias=False, name='Conv1')(img_input) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name='bn_Conv1')(x) + x = layers.ReLU(6.0, name='Conv1_relu')(x) + x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0) + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1) + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15) + x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16) + if alpha > 1.0: + last_block_filters = _make_divisible(1280 * alpha, 8) + else: + last_block_filters = 1280 + x = layers.Conv2D(last_block_filters, kernel_size=1, use_bias=False, name='Conv_1')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name='Conv_1_bn')(x) + x = layers.ReLU(6.0, name='out_relu')(x) + if include_top: + x = layers.GlobalAveragePooling2D()(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + if name is None: + name = f'mobilenetv2_{alpha:0.2f}_{rows}' + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + model_name = 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + str(float(alpha)) + '_' + str(rows) + '.h5' + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = file_utils.get_file(model_name, weight_path, cache_subdir='models') + else: + model_name = 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + str(float(alpha)) + '_' + str(rows) + '_no_top' + '.h5' + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = file_utils.get_file(model_name, weight_path, cache_subdir='models') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + in_channels = inputs.shape[channel_axis] + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + x = inputs + prefix = f'block_{block_id}_' + if block_id: + x = layers.Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'expand_BN')(x) + x = layers.ReLU(6.0, name=prefix + 'expand_relu')(x) + else: + prefix = 'expanded_conv_' + if stride == 2: + x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, 3), name=prefix + 'pad')(x) + x = layers.DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'depthwise_BN')(x) + x = layers.ReLU(6.0, name=prefix + 'depthwise_relu')(x) + x = layers.Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'project_BN')(x) + if in_channels == pointwise_filters and stride == 1: + return layers.Add(name=prefix + 'add')([inputs, x]) + return x + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + +@keras_export('keras.applications.mobilenet_v2.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.mobilenet_v2.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/mobilenet_v3.py +import warnings +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHT_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/' +WEIGHTS_HASHES = {'large_224_0.75_float': ('765b44a33ad4005b3ac83185abf1d0eb', '40af19a13ebea4e2ee0c676887f69a2e'), 'large_224_1.0_float': ('59e551e166be033d707958cf9e29a6a7', '07fb09a5933dd0c8eaafa16978110389'), 'large_minimalistic_224_1.0_float': ('675e7b876c45c57e9e63e6d90a36599c', 'ec5221f64a2f6d1ef965a614bdae7973'), 'small_224_0.75_float': ('cb65d4e5be93758266aa0a7f2c6708b7', 'ebdb5cc8e0b497cd13a7c275d475c819'), 'small_224_1.0_float': ('8768d4c2e7dee89b9d02b2d03d65d862', 'd3e8ec802a04aa4fc771ee12a9a9b836'), 'small_minimalistic_224_1.0_float': ('99cd97fb2fcdad2bf028eb838de69e37', 'cde8136e733e811080d9fcd8a252f7e4')} +BASE_DOCSTRING = 'Instantiates the {name} architecture.\n\nReference:\n- [Searching for MobileNetV3](\n https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)\n\nThe following table describes the performance of MobileNets v3:\n------------------------------------------------------------------------\nMACs stands for Multiply Adds\n\n|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|\n|---|---|---|---|---|\n| mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |\n| mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |\n| mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |\n| mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |\n| mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |\n| mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |\n\nFor image classification use cases, see\n[this page for detailed examples](\nhttps://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\nhttps://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor MobileNetV3, by default input preprocessing is included as a part of the\nmodel (as a `Rescaling` layer), and thus\n`keras.applications.mobilenet_v3.preprocess_input` is actually a\npass-through function. In this use case, MobileNetV3 models expect their\ninputs to be float tensors of pixels with values in the `[0-255]` range.\nAt the same time, preprocessing as a part of the model (i.e. `Rescaling`\nlayer) can be disabled by setting `include_preprocessing` argument to `False`.\nWith preprocessing disabled MobileNetV3 models expect their inputs to be float\ntensors of pixels with values in the `[-1, 1]` range.\n\nArgs:\n input_shape: Optional shape tuple, to be specified if you would\n like to use a model with an input image resolution that is not\n `(224, 224, 3)`.\n It should have exactly 3 inputs channels.\n You can also omit this option if you would like\n to infer input_shape from an input_tensor.\n If you choose to include both input_tensor and input_shape then\n input_shape will be used if they match, if the shapes\n do not match then we will throw an error.\n E.g. `(160, 160, 3)` would be one valid value.\n alpha: controls the width of the network. This is known as the\n depth multiplier in the MobileNetV3 paper, but the name is kept for\n consistency with MobileNetV1 in Keras.\n - If `alpha < 1.0`, proportionally decreases the number\n of filters in each layer.\n - If `alpha > 1.0`, proportionally increases the number\n of filters in each layer.\n - If `alpha == 1`, default number of filters from the paper\n are used at each layer.\n minimalistic: In addition to large and small models this module also\n contains so-called minimalistic models, these models have the same\n per-layer dimensions characteristic as MobilenetV3 however, they don\'t\n utilize any of the advanced blocks (squeeze-and-excite units,\n hard-swish, and 5x5 convolutions).\n While these models are less efficient on CPU, they\n are much more performant on GPU/DSP.\n include_top: Boolean, whether to include the fully-connected\n layer at the top of the network. Defaults to `True`.\n weights: String, one of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: String, optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional block.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional block, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Integer, optional number of classes to classify images\n into, only to be specified if `include_top` is `True`, and\n if no `weights` argument is specified.\n dropout_rate: fraction of the input units to drop on the last layer.\n classifier_activation: A `str` or callable. The activation function to use\n on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n include_preprocessing: Boolean, whether to include the preprocessing\n layer (`Rescaling`) at the bottom of the network. Defaults to `True`.\n name: String, the name of the model.\n\nCall arguments:\n inputs: A floating point `numpy.array` or backend-native tensor,\n 4D with 3 color channels, with values in the range `[0, 255]`\n if `include_preprocessing` is `True` and in the range `[-1, 1]`\n otherwise.\n\nReturns:\n A model instance.\n' + +def MobileNetV3(stack_fn, last_point_ch, input_shape=None, alpha=1.0, model_type='large', minimalistic=False, include_top=True, weights='imagenet', input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation='softmax', include_preprocessing=True, name=None): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f'The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded. Received weights={weights}') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f'If using `weights="imagenet"` with `include_top` as true, `classes` should be 1000. Received classes={classes}') + if input_shape is not None and input_tensor is not None: + try: + is_input_t_tensor = backend.is_keras_tensor(input_tensor) + except ValueError: + try: + is_input_t_tensor = backend.is_keras_tensor(operation_utils.get_source_inputs(input_tensor)) + except ValueError: + raise ValueError('input_tensor: ', input_tensor, f'is not type input_tensor. Received type(input_tensor)={type(input_tensor)}') + if is_input_t_tensor: + if backend.image_data_format() == 'channels_first': + if input_tensor.shape[1] != input_shape[1]: + raise ValueError(f'When backend.image_data_format()=channels_first, input_shape[1] must equal input_tensor.shape[1]. Received input_shape={input_shape}, input_tensor.shape={input_tensor.shape}') + elif input_tensor.shape[2] != input_shape[1]: + raise ValueError(f'input_shape[1] must equal input_tensor.shape[2]. Received input_shape={input_shape}, input_tensor.shape={input_tensor.shape}') + else: + raise ValueError('input_tensor specified: ', input_tensor, 'is not a keras tensor') + if input_shape is None and input_tensor is not None: + try: + backend.is_keras_tensor(input_tensor) + except ValueError: + raise ValueError('input_tensor: ', input_tensor, 'is type: ', type(input_tensor), 'which is not a valid type') + if backend.is_keras_tensor(input_tensor): + if backend.image_data_format() == 'channels_first': + rows = input_tensor.shape[2] + cols = input_tensor.shape[3] + input_shape = (3, cols, rows) + else: + rows = input_tensor.shape[1] + cols = input_tensor.shape[2] + input_shape = (cols, rows, 3) + if input_shape is None and input_tensor is None: + if backend.image_data_format() == 'channels_last': + input_shape = (None, None, 3) + else: + input_shape = (3, None, None) + if backend.image_data_format() == 'channels_last': + (row_axis, col_axis) = (0, 1) + else: + (row_axis, col_axis) = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + if rows and cols and (rows < 32 or cols < 32): + raise ValueError(f'Input size must be at least 32x32; Received `input_shape={input_shape}`') + if weights == 'imagenet': + if not minimalistic and alpha not in [0.75, 1.0] or (minimalistic and alpha != 1.0): + raise ValueError('If imagenet weights are being loaded, alpha can be one of `0.75`, `1.0` for non minimalistic or `1.0` for minimalistic only.') + if rows != cols or rows != 224: + warnings.warn('`input_shape` is undefined or non-square, or `rows` is not 224. Weights for input shape (224, 224) will be loaded as the default.', stacklevel=2) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + if minimalistic: + kernel = 3 + activation = relu + se_ratio = None + else: + kernel = 5 + activation = hard_swish + se_ratio = 0.25 + x = img_input + if include_preprocessing: + x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x) + x = layers.Conv2D(16, kernel_size=3, strides=(2, 2), padding='same', use_bias=False, name='conv')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name='conv_bn')(x) + x = activation(x) + x = stack_fn(x, kernel, activation, se_ratio) + last_conv_ch = _depth(x.shape[channel_axis] * 6) + if alpha > 1.0: + last_point_ch = _depth(last_point_ch * alpha) + x = layers.Conv2D(last_conv_ch, kernel_size=1, padding='same', use_bias=False, name='conv_1')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name='conv_1_bn')(x) + x = activation(x) + if include_top: + x = layers.GlobalAveragePooling2D(keepdims=True)(x) + x = layers.Conv2D(last_point_ch, kernel_size=1, padding='same', use_bias=True, name='conv_2')(x) + x = activation(x) + if dropout_rate > 0: + x = layers.Dropout(dropout_rate)(x) + x = layers.Conv2D(classes, kernel_size=1, padding='same', name='logits')(x) + x = layers.Flatten()(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Activation(activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D(name='max_pool')(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + model_name = '{}{}_224_{}_float'.format(model_type, '_minimalistic' if minimalistic else '', str(alpha)) + if include_top: + file_name = 'weights_mobilenet_v3_' + model_name + '.h5' + file_hash = WEIGHTS_HASHES[model_name][0] + else: + file_name = 'weights_mobilenet_v3_' + model_name + '_no_top_v2.h5' + file_hash = WEIGHTS_HASHES[model_name][1] + weights_path = file_utils.get_file(file_name, BASE_WEIGHT_PATH + file_name, cache_subdir='models', file_hash=file_hash) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export('keras.applications.MobileNetV3Small') +def MobileNetV3Small(input_shape=None, alpha=1.0, minimalistic=False, include_top=True, weights='imagenet', input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation='softmax', include_preprocessing=True, name='MobileNetV3Small'): + + def stack_fn(x, kernel, activation, se_ratio): + + def depth(d): + return _depth(d * alpha) + x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0) + x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1) + x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2) + x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3) + x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4) + x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5) + x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6) + x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7) + x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8) + x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9) + x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 10) + return x + return MobileNetV3(stack_fn, 1024, input_shape, alpha, 'small', minimalistic, include_top, weights, input_tensor, classes, pooling, dropout_rate, classifier_activation, include_preprocessing, name=name) + +@keras_export('keras.applications.MobileNetV3Large') +def MobileNetV3Large(input_shape=None, alpha=1.0, minimalistic=False, include_top=True, weights='imagenet', input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation='softmax', include_preprocessing=True, name='MobileNetV3Large'): + + def stack_fn(x, kernel, activation, se_ratio): + + def depth(d): + return _depth(d * alpha) + x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0) + x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1) + x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2) + x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3) + x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4) + x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5) + x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6) + x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7) + x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8) + x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9) + x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10) + x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11) + x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation, 12) + x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 13) + x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 14) + return x + return MobileNetV3(stack_fn, 1280, input_shape, alpha, 'large', minimalistic, include_top, weights, input_tensor, classes, pooling, dropout_rate, classifier_activation, include_preprocessing, name=name) +MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Small') +MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Large') + +def relu(x): + return layers.ReLU()(x) + +def hard_sigmoid(x): + return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0) + +def hard_swish(x): + return layers.Activation('hard_swish')(x) + +def _depth(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + +def _se_block(inputs, filters, se_ratio, prefix): + x = layers.GlobalAveragePooling2D(keepdims=True, name=prefix + 'squeeze_excite_avg_pool')(inputs) + x = layers.Conv2D(_depth(filters * se_ratio), kernel_size=1, padding='same', name=prefix + 'squeeze_excite_conv')(x) + x = layers.ReLU(name=prefix + 'squeeze_excite_relu')(x) + x = layers.Conv2D(filters, kernel_size=1, padding='same', name=prefix + 'squeeze_excite_conv_1')(x) + x = hard_sigmoid(x) + x = layers.Multiply(name=prefix + 'squeeze_excite_mul')([inputs, x]) + return x + +def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio, activation, block_id): + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + shortcut = x + prefix = 'expanded_conv_' + infilters = x.shape[channel_axis] + if block_id: + prefix = f'expanded_conv_{block_id}_' + x = layers.Conv2D(_depth(infilters * expansion), kernel_size=1, padding='same', use_bias=False, name=prefix + 'expand')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'expand_bn')(x) + x = activation(x) + if stride == 2: + x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, kernel_size), name=prefix + 'depthwise_pad')(x) + x = layers.DepthwiseConv2D(kernel_size, strides=stride, padding='same' if stride == 1 else 'valid', use_bias=False, name=prefix + 'depthwise')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'depthwise_bn')(x) + x = activation(x) + if se_ratio: + x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix) + x = layers.Conv2D(filters, kernel_size=1, padding='same', use_bias=False, name=prefix + 'project')(x) + x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=prefix + 'project_bn')(x) + if stride == 1 and infilters == filters: + x = layers.Add(name=prefix + 'add')([shortcut, x]) + return x + +@keras_export('keras.applications.mobilenet_v3.preprocess_input') +def preprocess_input(x, data_format=None): + return x + +@keras_export('keras.applications.mobilenet_v3.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/nasnet.py +import warnings +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/nasnet/' +NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-mobile.h5' +NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-mobile-no-top.h5' +NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-large.h5' +NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-large-no-top.h5' + +def NASNet(input_shape=None, penultimate_filters=4032, num_blocks=6, stem_block_filters=96, skip_reduction=True, filter_multiplier=2, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, default_size=None, classifier_activation='softmax', name='NASNet'): + if backend.image_data_format() == 'channels_first': + raise ValueError('NASNet does not support the `channels_first` image data format. Switch to `channels_last` by editing your local config file at ~/.keras/keras.json') + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000') + if isinstance(input_shape, tuple) and None in input_shape and (weights == 'imagenet'): + raise ValueError('When specifying the input shape of a NASNet and loading `ImageNet` weights, the input_shape argument must be static (no None entries). Got: `input_shape=' + str(input_shape) + '`.') + if default_size is None: + default_size = 331 + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if backend.image_data_format() != 'channels_last': + warnings.warn('The NASNet family of models is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.', stacklevel=2) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + if penultimate_filters % (24 * filter_multiplier ** 2) != 0: + raise ValueError(f'For NASNet-A models, the `penultimate_filters` must be a multiple of 24 * (`filter_multiplier` ** 2). Current value: {penultimate_filters}') + channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 + filters = penultimate_filters // 24 + x = layers.Conv2D(stem_block_filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name='stem_conv1', kernel_initializer='he_normal')(img_input) + x = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name='stem_bn1')(x) + p = None + (x, p) = _reduction_a_cell(x, p, filters // filter_multiplier ** 2, block_id='stem_1') + (x, p) = _reduction_a_cell(x, p, filters // filter_multiplier, block_id='stem_2') + for i in range(num_blocks): + (x, p) = _normal_a_cell(x, p, filters, block_id=f'{i}') + (x, p0) = _reduction_a_cell(x, p, filters * filter_multiplier, block_id=f'reduce_{num_blocks}') + p = p0 if not skip_reduction else p + for i in range(num_blocks): + (x, p) = _normal_a_cell(x, p, filters * filter_multiplier, block_id=f'{num_blocks + i + 1}') + (x, p0) = _reduction_a_cell(x, p, filters * filter_multiplier ** 2, block_id=f'reduce_{2 * num_blocks}') + p = p0 if not skip_reduction else p + for i in range(num_blocks): + (x, p) = _normal_a_cell(x, p, filters * filter_multiplier ** 2, block_id=f'{2 * num_blocks + i + 1}') + x = layers.Activation('relu')(x) + if include_top: + x = layers.GlobalAveragePooling2D()(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if default_size == 224: + if include_top: + weights_path = file_utils.get_file('nasnet_mobile.h5', NASNET_MOBILE_WEIGHT_PATH, cache_subdir='models', file_hash='020fb642bf7360b370c678b08e0adf61') + else: + weights_path = file_utils.get_file('nasnet_mobile_no_top.h5', NASNET_MOBILE_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='1ed92395b5b598bdda52abe5c0dbfd63') + model.load_weights(weights_path) + elif default_size == 331: + if include_top: + weights_path = file_utils.get_file('nasnet_large.h5', NASNET_LARGE_WEIGHT_PATH, cache_subdir='models', file_hash='11577c9a518f0070763c2b964a382f17') + else: + weights_path = file_utils.get_file('nasnet_large_no_top.h5', NASNET_LARGE_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='d81d89dc07e6e56530c4e77faddd61b5') + model.load_weights(weights_path) + else: + raise ValueError('ImageNet weights can only be loaded with NASNetLarge or NASNetMobile') + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export(['keras.applications.nasnet.NASNetMobile', 'keras.applications.NASNetMobile']) +def NASNetMobile(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, classifier_activation='softmax', name='nasnet_mobile'): + if backend.backend() == 'torch': + raise ValueError('NASNetMobile is not available with the torch backend at this time due to an outstanding bug. If interested, please open a PR.') + if not include_top and input_shape is None: + input_shape = (224, 224, 3) + return NASNet(input_shape, penultimate_filters=1056, num_blocks=4, stem_block_filters=32, skip_reduction=False, filter_multiplier=2, include_top=include_top, weights=weights, input_tensor=input_tensor, pooling=pooling, classes=classes, default_size=224, classifier_activation=classifier_activation, name=name) + +@keras_export(['keras.applications.nasnet.NASNetLarge', 'keras.applications.NASNetLarge']) +def NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, classifier_activation='softmax', name='nasnet_large'): + return NASNet(input_shape, penultimate_filters=4032, num_blocks=6, stem_block_filters=96, skip_reduction=True, filter_multiplier=2, include_top=include_top, weights=weights, input_tensor=input_tensor, pooling=pooling, classes=classes, default_size=331, classifier_activation=classifier_activation, name=name) + +def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), block_id=None): + channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 + with backend.name_scope(f'separable_conv_block_{block_id}'): + x = layers.Activation('relu')(ip) + if strides == (2, 2): + x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, kernel_size), name=f'separable_conv_1_pad_{block_id}')(x) + conv_pad = 'valid' + else: + conv_pad = 'same' + x = layers.SeparableConv2D(filters, kernel_size, strides=strides, name=f'separable_conv_1_{block_id}', padding=conv_pad, use_bias=False)(x) + x = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'separable_conv_1_bn_{block_id}')(x) + x = layers.Activation('relu')(x) + x = layers.SeparableConv2D(filters, kernel_size, name=f'separable_conv_2_{block_id}', padding='same', use_bias=False)(x) + x = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'separable_conv_2_bn_{block_id}')(x) + return x + +def _adjust_block(p, ip, filters, block_id=None): + channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 + img_dim = 2 if backend.image_data_format() == 'channels_first' else -2 + with backend.name_scope('adjust_block'): + if p is None: + p = ip + elif p.shape[img_dim] != ip.shape[img_dim]: + with backend.name_scope(f'adjust_reduction_block_{block_id}'): + p = layers.Activation('relu', name=f'adjust_relu_1_{block_id}')(p) + p1 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_1_{block_id}')(p) + p1 = layers.Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, name=f'adjust_conv_1_{block_id}', kernel_initializer='he_normal')(p1) + p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p) + p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2) + p2 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_2_{block_id}')(p2) + p2 = layers.Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, name=f'adjust_conv_2_{block_id}', kernel_initializer='he_normal')(p2) + p = layers.concatenate([p1, p2], axis=channel_dim) + p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p) + elif p.shape[channel_dim] != filters: + with backend.name_scope(f'adjust_projection_block_{block_id}'): + p = layers.Activation('relu')(p) + p = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'adjust_conv_projection_{block_id}', use_bias=False, kernel_initializer='he_normal')(p) + p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p) + return p + +def _normal_a_cell(ip, p, filters, block_id=None): + channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 + with backend.name_scope(f'normal_A_block_{block_id}'): + p = _adjust_block(p, ip, filters, block_id) + h = layers.Activation('relu')(ip) + h = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'normal_conv_1_{block_id}', use_bias=False, kernel_initializer='he_normal')(h) + h = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'normal_bn_1_{block_id}')(h) + with backend.name_scope('block_1'): + x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), block_id=f'normal_left1_{block_id}') + x1_2 = _separable_conv_block(p, filters, block_id=f'normal_right1_{block_id}') + x1 = layers.add([x1_1, x1_2], name=f'normal_add_1_{block_id}') + with backend.name_scope('block_2'): + x2_1 = _separable_conv_block(p, filters, (5, 5), block_id=f'normal_left2_{block_id}') + x2_2 = _separable_conv_block(p, filters, (3, 3), block_id=f'normal_right2_{block_id}') + x2 = layers.add([x2_1, x2_2], name=f'normal_add_2_{block_id}') + with backend.name_scope('block_3'): + x3 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'normal_left3_{block_id}')(h) + x3 = layers.add([x3, p], name=f'normal_add_3_{block_id}') + with backend.name_scope('block_4'): + x4_1 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'normal_left4_{block_id}')(p) + x4_2 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'normal_right4_{block_id}')(p) + x4 = layers.add([x4_1, x4_2], name=f'normal_add_4_{block_id}') + with backend.name_scope('block_5'): + x5 = _separable_conv_block(h, filters, block_id=f'normal_left5_{block_id}') + x5 = layers.add([x5, h], name=f'normal_add_5_{block_id}') + x = layers.concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name=f'normal_concat_{block_id}') + return (x, ip) + +def _reduction_a_cell(ip, p, filters, block_id=None): + channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 + with backend.name_scope(f'reduction_A_block_{block_id}'): + p = _adjust_block(p, ip, filters, block_id) + h = layers.Activation('relu')(ip) + h = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'reduction_conv_1_{block_id}', use_bias=False, kernel_initializer='he_normal')(h) + h = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'reduction_bn_1_{block_id}')(h) + h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3), name=f'reduction_pad_1_{block_id}')(h) + with backend.name_scope('block_1'): + x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), block_id=f'reduction_left1_{block_id}') + x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right1_{block_id}') + x1 = layers.add([x1_1, x1_2], name=f'reduction_add_1_{block_id}') + with backend.name_scope('block_2'): + x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left2_{block_id}')(h3) + x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right2_{block_id}') + x2 = layers.add([x2_1, x2_2], name=f'reduction_add_2_{block_id}') + with backend.name_scope('block_3'): + x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left3_{block_id}')(h3) + x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), block_id=f'reduction_right3_{block_id}') + x3 = layers.add([x3_1, x3_2], name=f'reduction_add3_{block_id}') + with backend.name_scope('block_4'): + x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'reduction_left4_{block_id}')(x1) + x4 = layers.add([x2, x4]) + with backend.name_scope('block_5'): + x5_1 = _separable_conv_block(x1, filters, (3, 3), block_id=f'reduction_left4_{block_id}') + x5_2 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_right5_{block_id}')(h3) + x5 = layers.add([x5_1, x5_2], name=f'reduction_add4_{block_id}') + x = layers.concatenate([x2, x3, x4, x5], axis=channel_dim, name=f'reduction_concat_{block_id}') + return (x, ip) + +@keras_export('keras.applications.nasnet.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.nasnet.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/resnet.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/resnet/' +WEIGHTS_HASHES = {'resnet50': ('2cb95161c43110f7111970584f804107', '4d473c1dd8becc155b73f8504c6f6626'), 'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5', '88cf7a10940856eca736dc7b7e228a21'), 'resnet152': ('100835be76be38e30d865e96f2aaae62', 'ee4c566cf9a93f14d82f913c2dc6dd0c'), 'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0', 'fac2f116257151a9d068a22e544a4917'), 'resnet101v2': ('6343647c601c52e1368623803854d971', 'c0ed64b8031c3730f411d2eb4eea35b5'), 'resnet152v2': ('a49b44d1979771252814e80f8ec446f9', 'ed17cf2e0169df9d443503ef94b23b33'), 'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a', '62527c363bdd9ec598bed41947b379fc'), 'resnext101': ('34fb605428fcc7aa4d62f44404c11509', '0f678c91647380debd923963594981b3')} + +def ResNet(stack_fn, preact, use_bias, include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet', weights_name=None): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f"The `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Received: weights={weights}") + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f"If using `weights='imagenet'` with `include_top=True`, `classes` should be 1000. Received classes={classes}") + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input) + x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x) + if not preact: + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='conv1_bn')(x) + x = layers.Activation('relu', name='conv1_relu')(x) + x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x) + x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x) + x = stack_fn(x) + if preact: + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='post_bn')(x) + x = layers.Activation('relu', name='post_relu')(x) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D(name='max_pool')(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet' and weights_name in WEIGHTS_HASHES: + if include_top: + file_name = weights_name + '_weights_tf_dim_ordering_tf_kernels.h5' + file_hash = WEIGHTS_HASHES[weights_name][0] + else: + file_name = weights_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5' + file_hash = WEIGHTS_HASHES[weights_name][1] + weights_path = file_utils.get_file(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +def residual_block_v1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None): + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + if conv_shortcut: + shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(x) + shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(shortcut) + else: + shortcut = x + x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x) + x = layers.Activation('relu', name=name + '_1_relu')(x) + x = layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_2_bn')(x) + x = layers.Activation('relu', name=name + '_2_relu')(x) + x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_3_bn')(x) + x = layers.Add(name=name + '_add')([shortcut, x]) + x = layers.Activation('relu', name=name + '_out')(x) + return x + +def stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None): + x = residual_block_v1(x, filters, stride=stride1, name=name + '_block1') + for i in range(2, blocks + 1): + x = residual_block_v1(x, filters, conv_shortcut=False, name=name + '_block' + str(i)) + return x + +def residual_block_v2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + preact = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_preact_bn')(x) + preact = layers.Activation('relu', name=name + '_preact_relu')(preact) + if conv_shortcut: + shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(preact) + else: + shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x + x = layers.Conv2D(filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x) + x = layers.Activation('relu', name=name + '_1_relu')(x) + x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) + x = layers.Conv2D(filters, kernel_size, strides=stride, use_bias=False, name=name + '_2_conv')(x) + x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_2_bn')(x) + x = layers.Activation('relu', name=name + '_2_relu')(x) + x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x) + x = layers.Add(name=name + '_out')([shortcut, x]) + return x + +def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None): + x = residual_block_v2(x, filters, conv_shortcut=True, name=name + '_block1') + for i in range(2, blocks): + x = residual_block_v2(x, filters, name=name + '_block' + str(i)) + x = residual_block_v2(x, filters, stride=stride1, name=name + '_block' + str(blocks)) + return x + +@keras_export(['keras.applications.resnet50.ResNet50', 'keras.applications.resnet.ResNet50', 'keras.applications.ResNet50']) +def ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet50'): + + def stack_fn(x): + x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name='conv2') + x = stack_residual_blocks_v1(x, 128, 4, name='conv3') + x = stack_residual_blocks_v1(x, 256, 6, name='conv4') + return stack_residual_blocks_v1(x, 512, 3, name='conv5') + return ResNet(stack_fn, preact=False, use_bias=True, weights_name='resnet50', name=name, include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.resnet.ResNet101', 'keras.applications.ResNet101']) +def ResNet101(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet101'): + + def stack_fn(x): + x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name='conv2') + x = stack_residual_blocks_v1(x, 128, 4, name='conv3') + x = stack_residual_blocks_v1(x, 256, 23, name='conv4') + return stack_residual_blocks_v1(x, 512, 3, name='conv5') + return ResNet(stack_fn, preact=False, use_bias=True, name=name, weights_name='resnet101', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.resnet.ResNet152', 'keras.applications.ResNet152']) +def ResNet152(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet152'): + + def stack_fn(x): + x = stack_residual_blocks_v1(x, 64, 3, stride1=1, name='conv2') + x = stack_residual_blocks_v1(x, 128, 8, name='conv3') + x = stack_residual_blocks_v1(x, 256, 36, name='conv4') + return stack_residual_blocks_v1(x, 512, 3, name='conv5') + return ResNet(stack_fn, preact=False, use_bias=True, name=name, weights_name='resnet152', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.resnet50.preprocess_input', 'keras.applications.resnet.preprocess_input']) +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='caffe') + +@keras_export(['keras.applications.resnet50.decode_predictions', 'keras.applications.resnet.decode_predictions']) +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ +DOC = '\n\nReference:\n- [Deep Residual Learning for Image Recognition](\n https://arxiv.org/abs/1512.03385) (CVPR 2015)\n\nFor image classification use cases, see [this page for detailed examples](\n https://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\n https://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor ResNet, call `keras.applications.resnet.preprocess_input` on your\ninputs before passing them to the model. `resnet.preprocess_input` will convert\nthe input images from RGB to BGR, then will zero-center each color channel with\nrespect to the ImageNet dataset, without scaling.\n\nArgs:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet), or the path to the weights\n file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified if `include_top`\n is `False` (otherwise the input shape has to be `(224, 224, 3)`\n (with `"channels_last"` data format) or `(3, 224, 224)`\n (with `"channels_first"` data format). It should have exactly 3\n inputs channels, and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be the 4D tensor\n output of the last convolutional block.\n - `avg` means that global average pooling will be applied to the output\n of the last convolutional block, and thus the output of the\n model will be a 2D tensor.\n - `max` means that global max pooling will be applied.\n classes: optional number of classes to classify images into, only to be\n specified if `include_top` is `True`, and if no `weights` argument is\n specified. Defaults to `1000`.\n classifier_activation: A `str` or callable. The activation function to\n use on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A Model instance.\n' +setattr(ResNet50, '__doc__', ResNet50.__doc__ + DOC) +setattr(ResNet101, '__doc__', ResNet101.__doc__ + DOC) +setattr(ResNet152, '__doc__', ResNet152.__doc__ + DOC) + +# File: keras-master/keras/src/applications/resnet_v2.py +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.applications import resnet + +@keras_export(['keras.applications.ResNet50V2', 'keras.applications.resnet_v2.ResNet50V2']) +def ResNet50V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet50v2'): + + def stack_fn(x): + x = resnet.stack_residual_blocks_v2(x, 64, 3, name='conv2') + x = resnet.stack_residual_blocks_v2(x, 128, 4, name='conv3') + x = resnet.stack_residual_blocks_v2(x, 256, 6, name='conv4') + return resnet.stack_residual_blocks_v2(x, 512, 3, stride1=1, name='conv5') + return resnet.ResNet(stack_fn, True, True, name=name, weights_name='resnet50v2', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.ResNet101V2', 'keras.applications.resnet_v2.ResNet101V2']) +def ResNet101V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet101v2'): + + def stack_fn(x): + x = resnet.stack_residual_blocks_v2(x, 64, 3, name='conv2') + x = resnet.stack_residual_blocks_v2(x, 128, 4, name='conv3') + x = resnet.stack_residual_blocks_v2(x, 256, 23, name='conv4') + return resnet.stack_residual_blocks_v2(x, 512, 3, stride1=1, name='conv5') + return resnet.ResNet(stack_fn, True, True, name=name, weights_name='resnet101v2', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export(['keras.applications.ResNet152V2', 'keras.applications.resnet_v2.ResNet152V2']) +def ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='resnet152v2'): + + def stack_fn(x): + x = resnet.stack_residual_blocks_v2(x, 64, 3, name='conv2') + x = resnet.stack_residual_blocks_v2(x, 128, 8, name='conv3') + x = resnet.stack_residual_blocks_v2(x, 256, 36, name='conv4') + return resnet.stack_residual_blocks_v2(x, 512, 3, stride1=1, name='conv5') + return resnet.ResNet(stack_fn, True, True, name=name, weights_name='resnet152v2', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, classifier_activation=classifier_activation) + +@keras_export('keras.applications.resnet_v2.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.resnet_v2.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ +DOC = '\n\nReference:\n- [Identity Mappings in Deep Residual Networks](\n https://arxiv.org/abs/1603.05027) (CVPR 2016)\n\nFor image classification use cases, see [this page for detailed examples](\n https://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\nFor transfer learning use cases, make sure to read the\n[guide to transfer learning & fine-tuning](\n https://keras.io/guides/transfer_learning/).\n\nNote: each Keras Application expects a specific kind of input preprocessing.\nFor ResNet, call `keras.applications.resnet_v2.preprocess_input` on your\ninputs before passing them to the model. `resnet_v2.preprocess_input` will\nscale input pixels between -1 and 1.\n\nArgs:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n `"imagenet"` (pre-training on ImageNet), or the path to the weights\n file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified if `include_top`\n is `False` (otherwise the input shape has to be `(224, 224, 3)`\n (with `"channels_last"` data format) or `(3, 224, 224)`\n (with `"channels_first"` data format). It should have exactly 3\n inputs channels, and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be the 4D tensor\n output of the last convolutional block.\n - `avg` means that global average pooling will be applied to the output\n of the last convolutional block, and thus the output of the\n model will be a 2D tensor.\n - `max` means that global max pooling will be applied.\n classes: optional number of classes to classify images into, only to be\n specified if `include_top` is `True`, and if no `weights` argument is\n specified.\n classifier_activation: A `str` or callable. The activation function to\n use on the "top" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the "top" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `"softmax"`.\n name: The name of the model (string).\n\nReturns:\n A Model instance.\n' +setattr(ResNet50V2, '__doc__', ResNet50V2.__doc__ + DOC) +setattr(ResNet101V2, '__doc__', ResNet101V2.__doc__ + DOC) +setattr(ResNet152V2, '__doc__', ResNet152V2.__doc__ + DOC) + +# File: keras-master/keras/src/applications/vgg16.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5' +WEIGHTS_PATH_NO_TOP = 'https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' + +@keras_export(['keras.applications.vgg16.VGG16', 'keras.applications.VGG16']) +def VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='vgg16'): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f"The `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Received: weights={weights}") + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f"If using `weights='imagenet'` with `include_top=True`, `classes` should be 1000. Received classes={classes}") + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + x = layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) + x = layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) + x = layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) + x = layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) + if include_top: + x = layers.Flatten(name='flatten')(x) + x = layers.Dense(4096, activation='relu', name='fc1')(x) + x = layers.Dense(4096, activation='relu', name='fc2')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + weights_path = file_utils.get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='64373286793e3c8b2b4e3219cbf3544b') + else: + weights_path = file_utils.get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='6d6bbae143d832006294945121d1f1fc') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export('keras.applications.vgg16.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='caffe') + +@keras_export('keras.applications.vgg16.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/vgg19.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5' +WEIGHTS_PATH_NO_TOP = 'https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5' + +@keras_export(['keras.applications.vgg19.VGG19', 'keras.applications.VGG19']) +def VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='vgg19'): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError(f"The `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Received: weights={weights}") + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f"If using `weights='imagenet'` with `include_top=True`, `classes` should be 1000. Received classes={classes}") + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + x = layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) + x = layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) + x = layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) + x = layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) + x = layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) + x = layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) + if include_top: + x = layers.Flatten(name='flatten')(x) + x = layers.Dense(4096, activation='relu', name='fc1')(x) + x = layers.Dense(4096, activation='relu', name='fc2')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + weights_path = file_utils.get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='cbe5617147190e668d6c5d5026f83318') + else: + weights_path = file_utils.get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='253f8cb515780f3b799900260a226db6') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export('keras.applications.vgg19.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='caffe') + +@keras_export('keras.applications.vgg19.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/applications/xception.py +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils +WEIGHTS_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5' +WEIGHTS_PATH_NO_TOP = 'https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5' + +@keras_export(['keras.applications.xception.Xception', 'keras.applications.Xception']) +def Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', name='xception'): + if not (weights in {'imagenet', None} or file_utils.exists(weights)): + raise ValueError("The `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded.") + if weights == 'imagenet' and include_top and (classes != 1000): + raise ValueError(f"If using `weights='imagenet'` with `include_top=True`, `classes` should be 1000. Received classes={classes}") + input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=299, min_size=71, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + elif not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + x = layers.Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input) + x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x) + x = layers.Activation('relu', name='block1_conv1_act')(x) + x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x) + x = layers.Activation('relu', name='block1_conv2_act')(x) + residual = layers.Conv2D(128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) + residual = layers.BatchNormalization(axis=channel_axis)(residual) + x = layers.SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x) + x = layers.Activation('relu', name='block2_sepconv2_act')(x) + x = layers.SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x) + x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x) + x = layers.add([x, residual]) + residual = layers.Conv2D(256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) + residual = layers.BatchNormalization(axis=channel_axis)(residual) + x = layers.Activation('relu', name='block3_sepconv1_act')(x) + x = layers.SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x) + x = layers.Activation('relu', name='block3_sepconv2_act')(x) + x = layers.SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x) + x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x) + x = layers.add([x, residual]) + residual = layers.Conv2D(728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) + residual = layers.BatchNormalization(axis=channel_axis)(residual) + x = layers.Activation('relu', name='block4_sepconv1_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x) + x = layers.Activation('relu', name='block4_sepconv2_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x) + x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x) + x = layers.add([x, residual]) + for i in range(8): + residual = x + prefix = 'block' + str(i + 5) + x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name=prefix + '_sepconv1_bn')(x) + x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name=prefix + '_sepconv2_bn')(x) + x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x) + x = layers.BatchNormalization(axis=channel_axis, name=prefix + '_sepconv3_bn')(x) + x = layers.add([x, residual]) + residual = layers.Conv2D(1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) + residual = layers.BatchNormalization(axis=channel_axis)(residual) + x = layers.Activation('relu', name='block13_sepconv1_act')(x) + x = layers.SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv1_bn')(x) + x = layers.Activation('relu', name='block13_sepconv2_act')(x) + x = layers.SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv2_bn')(x) + x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x) + x = layers.add([x, residual]) + x = layers.SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv1_bn')(x) + x = layers.Activation('relu', name='block14_sepconv1_act')(x) + x = layers.SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x) + x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv2_bn')(x) + x = layers.Activation('relu', name='block14_sepconv2_act')(x) + if include_top: + x = layers.GlobalAveragePooling2D(name='avg_pool')(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) + elif pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + if input_tensor is not None: + inputs = operation_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + model = Functional(inputs, x, name=name) + if weights == 'imagenet': + if include_top: + weights_path = file_utils.get_file('xception_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='0a58e3b7378bc2990ea3b43d5981f1f6') + else: + weights_path = file_utils.get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='b0042744bf5b25fce3cb969f33bebb97') + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + return model + +@keras_export('keras.applications.xception.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + +@keras_export('keras.applications.xception.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + +# File: keras-master/keras/src/backend/__init__.py +from keras.src.backend.config import backend +if backend() == 'torch': + import torch +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.keras_tensor import any_symbolic_tensors +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.name_scope import name_scope +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.stateless_scope import get_stateless_scope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.common.symbolic_scope import in_symbolic_scope +from keras.src.backend.common.variables import AutocastScope +from keras.src.backend.common.variables import get_autocast_scope +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.common.variables import standardize_shape +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.backend.config import standardize_data_format +if backend() == 'tensorflow': + from keras.src.backend.tensorflow import * +elif backend() == 'jax': + from keras.src.backend.jax import * +elif backend() == 'torch': + from keras.src.backend.torch import * + distribution_lib = None +elif backend() == 'numpy': + from keras.src.backend.numpy import * + distribution_lib = None +else: + raise ValueError(f'Unable to import backend : {backend()}') + +# File: keras-master/keras/src/backend/common/__init__.py +from keras.src.backend.common import backend_utils +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.variables import AutocastScope +from keras.src.backend.common.variables import KerasVariable +from keras.src.backend.common.variables import get_autocast_scope +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.common.variables import standardize_shape +from keras.src.random import random + +# File: keras-master/keras/src/backend/common/backend_utils.py +import functools +import operator +import re +import warnings + +def _convert_conv_transpose_padding_args_from_keras_to_jax(kernel_size, stride, dilation_rate, padding, output_padding): + assert padding.lower() in {'valid', 'same'} + kernel_size = (kernel_size - 1) * dilation_rate + 1 + if padding.lower() == 'valid': + output_padding = max(kernel_size, stride) - kernel_size if output_padding is None else output_padding + left_pad = kernel_size - 1 + right_pad = kernel_size - 1 + output_padding + else: + if output_padding is None: + pad_len = stride + kernel_size - 2 + else: + pad_len = kernel_size + kernel_size % 2 - 2 + output_padding + left_pad = min(pad_len // 2 + pad_len % 2, kernel_size - 1) + right_pad = pad_len - left_pad + return (left_pad, right_pad) + +def _convert_conv_transpose_padding_args_from_keras_to_torch(kernel_size, stride, dilation_rate, padding, output_padding): + assert padding.lower() in {'valid', 'same'} + original_kernel_size = kernel_size + kernel_size = (kernel_size - 1) * dilation_rate + 1 + if padding.lower() == 'valid': + output_padding = max(kernel_size, stride) - kernel_size if output_padding is None else output_padding + torch_padding = 0 + torch_output_padding = output_padding + else: + output_padding = stride - kernel_size % 2 if output_padding is None else output_padding + torch_padding = max(-((kernel_size % 2 - kernel_size + output_padding) // 2), 0) + torch_output_padding = 2 * torch_padding + kernel_size % 2 - kernel_size + output_padding + if torch_padding > 0 and torch_output_padding > 0: + warnings.warn(f'You might experience inconsistencies across backends when calling conv transpose with kernel_size={original_kernel_size}, stride={stride}, dilation_rate={dilation_rate}, padding={padding}, output_padding={output_padding}.') + if torch_output_padding >= stride: + raise ValueError(f'The padding arguments (padding={padding}) and output_padding={output_padding}) lead to a Torch output_padding ({torch_output_padding}) that is greater than strides ({stride}). This is not supported. You can change the padding arguments, kernel or stride, or run on another backend. ') + return (torch_padding, torch_output_padding) + +def compute_conv_transpose_padding_args_for_jax(input_shape, kernel_shape, strides, padding, output_padding, dilation_rate): + num_spatial_dims = len(input_shape) - 2 + kernel_spatial_shape = kernel_shape[:-2] + jax_padding = [] + for i in range(num_spatial_dims): + output_padding_i = output_padding if output_padding is None or isinstance(output_padding, int) else output_padding[i] + strides_i = strides if isinstance(strides, int) else strides[i] + dilation_rate_i = dilation_rate if isinstance(dilation_rate, int) else dilation_rate[i] + (pad_left, pad_right) = _convert_conv_transpose_padding_args_from_keras_to_jax(kernel_size=kernel_spatial_shape[i], stride=strides_i, dilation_rate=dilation_rate_i, padding=padding, output_padding=output_padding_i) + jax_padding.append((pad_left, pad_right)) + return jax_padding + +def compute_conv_transpose_padding_args_for_torch(input_shape, kernel_shape, strides, padding, output_padding, dilation_rate): + num_spatial_dims = len(input_shape) - 2 + kernel_spatial_shape = kernel_shape[:-2] + torch_paddings = [] + torch_output_paddings = [] + for i in range(num_spatial_dims): + output_padding_i = output_padding if output_padding is None or isinstance(output_padding, int) else output_padding[i] + strides_i = strides if isinstance(strides, int) else strides[i] + dilation_rate_i = dilation_rate if isinstance(dilation_rate, int) else dilation_rate[i] + (torch_padding, torch_output_padding) = _convert_conv_transpose_padding_args_from_keras_to_torch(kernel_size=kernel_spatial_shape[i], stride=strides_i, dilation_rate=dilation_rate_i, padding=padding, output_padding=output_padding_i) + torch_paddings.append(torch_padding) + torch_output_paddings.append(torch_output_padding) + return (torch_paddings, torch_output_paddings) + +def _get_output_shape_given_tf_padding(input_size, kernel_size, strides, padding, output_padding, dilation_rate): + if input_size is None: + return None + assert padding.lower() in {'valid', 'same'} + kernel_size = (kernel_size - 1) * dilation_rate + 1 + if padding.lower() == 'valid': + output_padding = max(kernel_size, strides) - kernel_size if output_padding is None else output_padding + return (input_size - 1) * strides + kernel_size + output_padding + elif output_padding is None: + return input_size * strides + else: + return (input_size - 1) * strides + kernel_size % 2 + output_padding + +def compute_conv_transpose_output_shape(input_shape, kernel_size, filters, strides, padding, output_padding=None, data_format='channels_last', dilation_rate=1): + num_spatial_dims = len(input_shape) - 2 + kernel_spatial_shape = kernel_size + if isinstance(output_padding, int): + output_padding = (output_padding,) * len(kernel_spatial_shape) + if isinstance(strides, int): + strides = (strides,) * num_spatial_dims + if isinstance(dilation_rate, int): + dilation_rate = (dilation_rate,) * num_spatial_dims + if data_format == 'channels_last': + input_spatial_shape = input_shape[1:-1] + else: + input_spatial_shape = input_shape[2:] + output_shape = [] + for i in range(num_spatial_dims): + current_output_padding = None if output_padding is None else output_padding[i] + shape_i = _get_output_shape_given_tf_padding(input_size=input_spatial_shape[i], kernel_size=kernel_spatial_shape[i], strides=strides[i], padding=padding, output_padding=current_output_padding, dilation_rate=dilation_rate[i]) + output_shape.append(shape_i) + if data_format == 'channels_last': + output_shape = [input_shape[0]] + output_shape + [filters] + else: + output_shape = [input_shape[0], filters] + output_shape + return output_shape + +def canonicalize_axis(axis, num_dims): + axis = operator.index(axis) + if not -num_dims <= axis < num_dims: + raise ValueError(f'axis {axis} is out of bounds for an array with dimension {num_dims}.') + if axis < 0: + axis = axis + num_dims + return axis + +def standardize_axis_for_numpy(axis): + return tuple(axis) if isinstance(axis, list) else axis + +def to_tuple_or_list(value): + if value is None: + return value + if not isinstance(value, (int, tuple, list)): + raise ValueError(f'`value` must be an integer, tuple or list. Received: value={value}') + if isinstance(value, int): + return (value,) + return value +_DIMENSION_NAME = '\\w+' +_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) +_ARGUMENT = f'\\({_CORE_DIMENSION_LIST}\\)' +_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) +_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) + +def _vectorize_parse_gufunc_signature(signature): + if not re.match(_SIGNATURE, signature): + raise ValueError(f'not a valid gufunc signature: {signature}') + (args, retvals) = ([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) + return (args, retvals) + +def _vectorize_update_dim_sizes(dim_sizes, shape, core_dims, is_input=True): + num_core_dims = len(core_dims) + if is_input: + if len(shape) < num_core_dims: + raise ValueError(f'input with shape {shape} does not have enough dimensions for all core dimensions {core_dims}') + elif len(shape) != num_core_dims: + raise ValueError(f'output shape {shape} does not match core dimensions {core_dims}') + core_shape = shape[-num_core_dims:] if core_dims else () + for (dim, size) in zip(core_dims, core_shape): + if dim not in dim_sizes: + dim_sizes[dim] = size + elif size != dim_sizes[dim]: + raise ValueError(f'inconsistent size for core dimension {dim}: {size} vs {dim_sizes[dim]}') + +def _vectorize_parse_input_dimensions(args, input_core_dims): + from keras.src import ops + if len(args) != len(input_core_dims): + raise TypeError(f'wrong number of positional arguments: expected {len(input_core_dims)}, got {len(args)}') + shapes = [] + dim_sizes: dict[str, int] = {} + for (arg, core_dims) in zip(args, input_core_dims): + _vectorize_update_dim_sizes(dim_sizes, arg.shape, core_dims, is_input=True) + ndim = arg.ndim - len(core_dims) + shapes.append(arg.shape[:ndim]) + broadcast_shape = shapes[0] + for s in shapes: + broadcast_shape = ops.broadcast_shapes(broadcast_shape, s) + return (broadcast_shape, dim_sizes) + +def _vectorize_check_output_dims(func, dim_sizes, expected_output_core_dims): + from keras.src import ops + + def wrapped(*args): + out = func(*args) + if isinstance(out, (list, tuple)): + out_shapes = [ops.shape(x) for x in out] + else: + out_shapes = [out.shape] + if expected_output_core_dims is None: + output_core_dims = [()] * len(out_shapes) + else: + output_core_dims = expected_output_core_dims + if len(output_core_dims) > 1 and (not isinstance(out, tuple)): + raise TypeError(f'output must be a tuple when multiple outputs are expected, got: {out}') + if len(out_shapes) != len(output_core_dims): + raise TypeError(f'wrong number of output arguments: expected {len(output_core_dims)}, got {len(out_shapes)}') + sizes = dict(dim_sizes) + for (shape, core_dims) in zip(out_shapes, output_core_dims): + _vectorize_update_dim_sizes(sizes, shape, core_dims, is_input=False) + return out + return wrapped + +def _vectorize_apply_excluded(func, excluded, args, kwargs): + if not excluded: + return (func, args, kwargs) + dynamic_args = [arg for (i, arg) in enumerate(args) if i not in excluded] + dynamic_kwargs = {key: val for (key, val) in kwargs.items() if key not in excluded} + static_args = [(i, args[i]) for i in sorted((e for e in excluded if isinstance(e, int))) if i < len(args)] + static_kwargs = {key: val for (key, val) in kwargs.items() if key in excluded} + + def new_func(*args, **kwargs): + args = list(args) + for (i, arg) in static_args: + args.insert(i, arg) + return func(*args, **kwargs, **static_kwargs) + return (new_func, dynamic_args, dynamic_kwargs) + +def vectorize_impl(pyfunc, vmap_fn, *, excluded=None, signature=None): + from keras.src import ops + excluded = None or set() + + @functools.wraps(pyfunc) + def wrapped(*args, **kwargs): + (excluded_func, args, kwargs) = _vectorize_apply_excluded(pyfunc, excluded, args, kwargs) + if signature is not None: + (input_core_dims, output_core_dims) = _vectorize_parse_gufunc_signature(signature) + else: + input_core_dims = [()] * len(args) + output_core_dims = None + none_args = {i for (i, arg) in enumerate(args) if arg is None} + if any(none_args): + if any((input_core_dims[i] != () for i in none_args)): + raise ValueError(f'Cannot pass None at locations {none_args} with signature={signature}') + (excluded_func, args, _) = _vectorize_apply_excluded(excluded_func, none_args, args, {}) + input_core_dims = [dim for (i, dim) in enumerate(input_core_dims) if i not in none_args] + args = tuple(map(ops.convert_to_tensor, args)) + (broadcast_shape, dim_sizes) = _vectorize_parse_input_dimensions(args, input_core_dims) + checked_func = _vectorize_check_output_dims(excluded_func, dim_sizes, output_core_dims) + squeezed_args = [] + rev_filled_shapes = [] + for (arg, core_dims) in zip(args, input_core_dims): + noncore_shape = arg.shape[:arg.ndim - len(core_dims)] + pad_ndim = len(broadcast_shape) - len(noncore_shape) + filled_shape = pad_ndim * (1,) + noncore_shape + rev_filled_shapes.append(filled_shape[::-1]) + squeeze_indices = tuple((i for (i, size) in enumerate(noncore_shape) if size == 1)) + squeezed_arg = ops.squeeze(arg, axis=squeeze_indices) + squeezed_args.append(squeezed_arg) + vectorized_func = checked_func + dims_to_expand = [] + for (negdim, axis_sizes) in enumerate(zip(*rev_filled_shapes)): + in_axes = tuple((None if size == 1 else 0 for size in axis_sizes)) + if all((axis is None for axis in in_axes)): + dims_to_expand.append(len(broadcast_shape) - 1 - negdim) + else: + vectorized_func = vmap_fn(vectorized_func, in_axes) + result = vectorized_func(*squeezed_args) + if not dims_to_expand: + return result + elif isinstance(result, tuple): + return tuple((ops.expand_dims(r, axis=dims_to_expand) for r in result)) + else: + return ops.expand_dims(result, axis=dims_to_expand) + return wrapped + +def slice_along_axis(x, start=0, stop=None, step=1, axis=0): + if axis >= 0: + slices = [slice(None)] * axis + [slice(start, stop, step)] + else: + slices = [Ellipsis, slice(start, stop, step)] + [slice(None)] * (-1 - axis) + return x[tuple(slices)] + +# File: keras-master/keras/src/backend/common/dtypes.py +import functools +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.backend.common.variables import standardize_dtype +BOOL_TYPES = ('bool',) +INT_TYPES = ('uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', 'int32', 'int64') +FLOAT_TYPES = ('bfloat16', 'float16', 'float32', 'float64') +WEAK_TYPES = ('int', 'float') +FLOAT8_TYPES = ('float8_e4m3fn', 'float8_e5m2') +ALLOWED_DTYPES = ('float16', 'float32', 'float64', 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', 'int32', 'int64', 'bfloat16', 'bool', 'string', 'float8_e4m3fn', 'float8_e5m2') +PYTHON_DTYPES_MAP = {bool: 'bool', int: 'int64' if config.backend() == 'tensorflow' else 'int32', float: 'float32', str: 'string', 'int': 'int64' if config.backend() == 'tensorflow' else 'int32'} + +def _type_promotion_lattice(): + (b1,) = BOOL_TYPES + (u1, u2, u4, u8, i1, i2, i4, i8) = INT_TYPES + (bf, f2, f4, f8) = FLOAT_TYPES + (i_, f_) = WEAK_TYPES + out = {b1: [i_], u1: [i2, u2], u2: [i4, u4], u4: [i8, u8], u8: [f_], i_: [u1, i1], i1: [i2], i2: [i4], i4: [i8], i8: [f_], f_: [bf, f2], bf: [f4], f2: [f4], f4: [f8], f8: []} + return out + +def _make_lattice_upper_bounds(): + lattice = _type_promotion_lattice() + upper_bounds = {node: {node} for node in lattice} + for n in lattice: + while True: + new_upper_bounds = set().union(*(lattice[b] for b in upper_bounds[n])) + if n in new_upper_bounds: + raise ValueError(f'cycle detected in type promotion lattice for node {n}') + if new_upper_bounds.issubset(upper_bounds[n]): + break + upper_bounds[n] |= new_upper_bounds + return upper_bounds +LATTICE_UPPER_BOUNDS = _make_lattice_upper_bounds() + +@functools.lru_cache(512) +def _least_upper_bound(*nodes): + N = set(nodes) + UB = LATTICE_UPPER_BOUNDS + try: + bounds = [UB[n] for n in N] + except KeyError: + dtype = next((n for n in N if n not in UB)) + raise ValueError(f'dtype={dtype!r} is not a valid dtype for Keras type promotion.') + CUB = set.intersection(*bounds) + LUB = CUB & N or {c for c in CUB if CUB.issubset(UB[c])} + if len(LUB) == 1: + return LUB.pop() + elif len(LUB) == 0: + msg = f'Input dtypes {tuple((str(n) for n in nodes))} have no available implicit dtype promotion path. Try explicitly casting inputs to the desired output type.' + raise ValueError(msg) + else: + raise ValueError(f"Internal Type Promotion error: {nodes} do not have a unique least upper bound on the specified lattice; options are {LUB}. This is an unexpected error in Keras's internal logic; please report it to the maintainers.") + +def _dtype_and_weaktype(value): + is_weak_type = False + if value is int or value is float: + is_weak_type = True + return (standardize_dtype(value), is_weak_type) + +@functools.lru_cache(maxsize=None) +def _respect_weak_type(dtype, weak_type): + if weak_type: + if dtype == 'bool': + return dtype + elif 'float' in dtype: + return 'float' + elif 'int' in dtype: + return 'int' + else: + raise ValueError(f'Invalid value for argument `dtype`. Expected one of {ALLOWED_DTYPES}. Received: dtype={dtype}') + return dtype + +@functools.lru_cache(maxsize=None) +def _resolve_weak_type(dtype, precision='32'): + extended_allowed_dtypes = set(ALLOWED_DTYPES).union(WEAK_TYPES) + if dtype not in extended_allowed_dtypes: + raise ValueError(f'Invalid value for argument `dtype`. Expected one of {extended_allowed_dtypes}. Received: dtype={dtype}') + if precision not in ['16', '32', '64']: + raise ValueError(f"Invalid value for argument `precision`. Expected one of ('16', '32', '64'). Received: precision={precision}") + if dtype == 'bfloat16': + dtype_indicator = 'f' + else: + dtype_indicator = dtype[:1] + if dtype_indicator == 'b': + return 'bool' + elif dtype_indicator == 'i': + return 'int' + precision + elif dtype_indicator == 'u': + return 'uint' + precision + else: + return 'float' + precision +BIT64_TO_BIT16_DTYPE = {'int32': 'int16', 'int64': 'int16', 'uint32': 'uint16', 'uint64': 'uint16', 'float32': 'float16', 'float64': 'float16'} +BIT64_TO_BIT32_DTYPE = {'int64': 'int32', 'uint64': 'uint32', 'float64': 'float32'} + +def _lattice_result_type(*args): + (dtypes, weak_types) = zip(*(_dtype_and_weaktype(arg) for arg in args)) + if len(dtypes) == 1: + out_dtype = dtypes[0] + out_weak_type = weak_types[0] + elif len(set(dtypes)) == 1 and (not all(weak_types)): + out_dtype = dtypes[0] + out_weak_type = False + elif all(weak_types): + out_dtype = _least_upper_bound(*{_respect_weak_type(d, False) for d in dtypes}) + out_weak_type = True + else: + out_dtype = _least_upper_bound(*{_respect_weak_type(d, w) for (d, w) in zip(dtypes, weak_types)}) + out_weak_type = any((out_dtype is t for t in WEAK_TYPES)) + out_weak_type = out_dtype != 'bool' and out_weak_type + precision = config.floatx()[-2:] + if out_weak_type: + out_dtype = _resolve_weak_type(out_dtype, precision=precision) + return out_dtype + +@keras_export('keras.backend.result_type') +def result_type(*dtypes): + if len(dtypes) == 0: + return config.floatx() + for dtype in dtypes: + if dtype in FLOAT8_TYPES: + raise ValueError(f'There is no implicit conversions from float8 dtypes to others. You must cast it internally. Received: {dtypes}') + return _lattice_result_type(*(config.floatx() if arg is None else arg for arg in dtypes)) + +# File: keras-master/keras/src/backend/common/global_state.py +import gc +import threading +from keras.src import backend +from keras.src.api_export import keras_export +GLOBAL_STATE_TRACKER = threading.local() +GLOBAL_SETTINGS_TRACKER = threading.local() + +def set_global_attribute(name, value): + setattr(GLOBAL_STATE_TRACKER, name, value) + +def get_global_attribute(name, default=None, set_to_default=False): + attr = getattr(GLOBAL_STATE_TRACKER, name, None) + if attr is None and default is not None: + attr = default + if set_to_default: + set_global_attribute(name, attr) + return attr + +@keras_export(['keras.utils.clear_session', 'keras.backend.clear_session']) +def clear_session(free_memory=True): + global GLOBAL_STATE_TRACKER + global GLOBAL_SETTINGS_TRACKER + GLOBAL_STATE_TRACKER = threading.local() + GLOBAL_SETTINGS_TRACKER = threading.local() + if backend.backend() == 'tensorflow': + from keras.src.utils.module_utils import tensorflow as tf + tf.compat.v1.reset_default_graph() + if tf.executing_eagerly(): + from tensorflow.python.eager import context + context.context().clear_kernel_cache() + elif backend.backend() == 'torch': + import torch._dynamo as dynamo + dynamo.reset() + if free_memory: + gc.collect() + +# File: keras-master/keras/src/backend/common/keras_tensor.py +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils.naming import auto_name + +@keras_export('keras.KerasTensor') +class KerasTensor: + + def __init__(self, shape, dtype='float32', sparse=False, record_history=True, name=None): + from keras.src import backend + self._shape = backend.standardize_shape(shape) + self._dtype = backend.standardize_dtype(dtype) + self._sparse = bool(sparse) + self.name = name or auto_name(self.__class__.__name__) + self.record_history = record_history + + @property + def shape(self): + return self._shape + + @shape.setter + def shape(self, value): + raise AttributeError(f'The shape of {self.__class__.__name__} is immutable. One should create a new instance of KerasTensor for this.') + + @property + def dtype(self): + return self._dtype + + @dtype.setter + def dtype(self, value): + raise AttributeError(f'The dtype of {self.__class__.__name__} is immutable. One should create a new instance of KerasTensor for this.') + + @property + def sparse(self): + return self._sparse + + @sparse.setter + def sparse(self, value): + raise AttributeError(f'The sparse of {self.__class__.__name__} is immutable. One should create a new instance of KerasTensor for this.') + + @property + def ndim(self): + return len(self.shape) + + def reshape(self, newshape): + from keras.src import ops + return ops.Reshape(newshape)(self) + + def squeeze(self, axis=None): + from keras.src import ops + return ops.Squeeze(axis)(self) + + def __int__(self): + raise ValueError("A KerasTensor is symbolic: it's a placeholder for a shape an a dtype. It doesn't have any actual numerical value. You cannot convert it to an int.") + + def __float__(self): + raise ValueError("A KerasTensor is symbolic: it's a placeholder for a shape an a dtype. It doesn't have any actual numerical value. You cannot convert it to a float.") + + def __array__(self): + raise ValueError("A KerasTensor is symbolic: it's a placeholder for a shape an a dtype. It doesn't have any actual numerical value. You cannot convert it to a NumPy array.") + + def __jax_array__(self): + raise ValueError('A KerasTensor cannot be used as input to a JAX function. A KerasTensor is a symbolic placeholder for a shape and dtype, used when constructing Keras Functional models or Keras Functions. You can only use it as input to a Keras layer or a Keras operation (from the namespaces `keras.layers` and `keras.operations`). You are likely doing something like:\n\n```\nx = Input(...)\n...\njax_fn(x) # Invalid.\n```\n\nWhat you should do instead is wrap `jax_fn` in a layer:\n\n```\nclass MyLayer(Layer):\n def call(self, x):\n return jax_fn(x)\n\nx = MyLayer()(x)\n```\n') + + def __tf_tensor__(self, dtype=None, name=None): + raise ValueError('A KerasTensor cannot be used as input to a TensorFlow function. A KerasTensor is a symbolic placeholder for a shape and dtype, used when constructing Keras Functional models or Keras Functions. You can only use it as input to a Keras layer or a Keras operation (from the namespaces `keras.layers` and `keras.operations`). You are likely doing something like:\n\n```\nx = Input(...)\n...\ntf_fn(x) # Invalid.\n```\n\nWhat you should do instead is wrap `tf_fn` in a layer:\n\n```\nclass MyLayer(Layer):\n def call(self, x):\n return tf_fn(x)\n\nx = MyLayer()(x)\n```\n') + + def __repr__(self): + return f'' + + def __iter__(self): + raise NotImplementedError('Iterating over a symbolic KerasTensor is not supported.') + + def __bool__(self): + raise TypeError('A symbolic KerasTensor cannot be used as a boolean.') + + def __add__(self, other): + from keras.src import ops + return ops.Add().symbolic_call(self, other) + + def __radd__(self, other): + from keras.src import ops + return ops.Add().symbolic_call(other, self) + + def __sub__(self, other): + from keras.src import ops + return ops.Subtract().symbolic_call(self, other) + + def __rsub__(self, other): + from keras.src import ops + return ops.Subtract().symbolic_call(other, self) + + def __mul__(self, other): + from keras.src import ops + return ops.Multiply().symbolic_call(self, other) + + def __rmul__(self, other): + from keras.src import ops + return ops.Multiply().symbolic_call(other, self) + + def __matmul__(self, other): + from keras.src import ops + return ops.Matmul().symbolic_call(self, other) + + def __rmatmul__(self, other): + from keras.src import ops + return ops.Matmul().symbolic_call(other, self) + + def __div__(self, other): + from keras.src import ops + return ops.Divide().symbolic_call(self, other) + + def __rdiv__(self, other): + from keras.src import ops + return ops.Divide().symbolic_call(other, self) + + def __truediv__(self, other): + from keras.src import ops + return ops.TrueDivide().symbolic_call(self, other) + + def __rtruediv__(self, other): + from keras.src import ops + return ops.TrueDivide().symbolic_call(other, self) + + def __neg__(self): + from keras.src import ops + return ops.Negative().symbolic_call(self) + + def __abs__(self): + from keras.src import ops + return ops.Absolute().symbolic_call(self) + + def __pow__(self, other): + from keras.src import ops + return ops.Power().symbolic_call(self, other) + + def __rpow__(self, other): + from keras.src import ops + return ops.Power().symbolic_call(other, self) + + def __floordiv__(self, other): + from keras.src import ops + return ops.FloorDivide().symbolic_call(self, other) + + def __rfloordiv__(self, other): + from keras.src import ops + return ops.FloorDivide().symbolic_call(other, self) + + def __mod__(self, other): + from keras.src import ops + return ops.Mod().symbolic_call(self, other) + + def __rmod__(self, other): + from keras.src import ops + return ops.Mod().symbolic_call(other, self) + + def __lt__(self, other): + from keras.src import ops + return ops.Less().symbolic_call(self, other) + + def __le__(self, other): + from keras.src import ops + return ops.LessEqual().symbolic_call(self, other) + + def __gt__(self, other): + from keras.src import ops + return ops.Greater().symbolic_call(self, other) + + def __ge__(self, other): + from keras.src import ops + return ops.GreaterEqual().symbolic_call(self, other) + + def __ne__(self, other): + from keras.src import ops + return ops.NotEqual().symbolic_call(self, other) + + def __and__(self, other): + from keras.src import ops + return ops.LogicalAnd().symbolic_call(self, other) + + def __rand__(self, other): + from keras.src import ops + return ops.LogicalAnd().symbolic_call(other, self) + + def __or__(self, other): + from keras.src import ops + return ops.LogicalOr().symbolic_call(self, other) + + def __ror__(self, other): + from keras.src import ops + return ops.LogicalOr().symbolic_call(other, self) + + def __invert__(self): + from keras.src import ops + return ops.LogicalNot().symbolic_call(self) + + def __xor__(self, other): + from keras.src import ops + return ops.LogicalXor().symbolic_call(self, other) + + def __rxor__(self, other): + from keras.src import ops + return ops.LogicalXor().symbolic_call(other, self) + + def __getitem__(self, key): + from keras.src import ops + return ops.GetItem().symbolic_call(self, key) + + def __round__(self, ndigits=None): + from keras.src import ops + decimals = ndigits or 0 + return ops.Round(decimals=decimals).symbolic_call(self) + +def any_symbolic_tensors(args=None, kwargs=None): + args = args or () + kwargs = kwargs or {} + for x in tree.flatten((args, kwargs)): + if isinstance(x, KerasTensor): + return True + return False + +@keras_export(['keras.utils.is_keras_tensor', 'keras.backend.is_keras_tensor']) +def is_keras_tensor(x): + return isinstance(x, KerasTensor) + +# File: keras-master/keras/src/backend/common/name_scope.py +from keras.src.backend.common import global_state + +class name_scope: + + def __init__(self, name, caller=None, deduplicate=True, override_parent=None): + if not isinstance(name, str) or '/' in name: + raise ValueError(f'Argument `name` must be a string and cannot contain character `/`. Received: name={name}') + self.name = name + self.caller = caller + self.deduplicate = deduplicate + self.override_parent = override_parent + if override_parent is None and deduplicate and (getattr(caller, '_parent_path', None) is not None): + self.override_parent = caller._parent_path + self._pop_on_exit = False + + def __enter__(self): + name_scope_stack = global_state.get_global_attribute('name_scope_stack', default=[], set_to_default=True) + if self.deduplicate and name_scope_stack: + parent_caller = name_scope_stack[-1].caller + parent_name = name_scope_stack[-1].name + if self.caller is not None and self.caller is parent_caller and (self.name == parent_name): + return self + name_scope_stack.append(self) + self._pop_on_exit = True + return self + + def __exit__(self, *args, **kwargs): + if self._pop_on_exit: + name_scope_stack = global_state.get_global_attribute('name_scope_stack') + name_scope_stack.pop() + +def current_path(): + name_scope_stack = global_state.get_global_attribute('name_scope_stack') + if name_scope_stack is None: + return '' + parts = [] + for entry in name_scope_stack: + if entry.override_parent is not None: + parts = [p for p in entry.override_parent.split('/') if p] + parts.append(entry.name) + return '/'.join(parts) + +# File: keras-master/keras/src/backend/common/stateless_scope.py +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + +@keras_export('keras.StatelessScope') +class StatelessScope: + + def __init__(self, state_mapping=None, collect_losses=False, initialize_variables=True): + from keras.src import backend + from keras.src.backend.common.variables import KerasVariable + self.collect_losses = collect_losses + self.initialize_variables = initialize_variables + self.losses = [] + self.state_mapping = {} + state_mapping = state_mapping or {} + for (k, v) in state_mapping: + if not isinstance(k, KerasVariable): + raise ValueError(f'Invalid reference variable in StatelessScope: all keys in argument `mapping` must be KerasVariable instances. Received instead: {k}') + if isinstance(v, KerasVariable): + v = backend.cast(v.value, dtype=k.dtype) + else: + v = backend.convert_to_tensor(v, dtype=k.dtype) + if k.shape != v.shape: + raise ValueError(f'Invalid variable value in StatelessScope: all values in argument `mapping` must be tensors with a shape that matches the corresponding variable shape. For variable {k}, received invalid value {v} with shape {v.shape}.') + self.state_mapping[id(k)] = v + + def __enter__(self): + self.original_scope = get_stateless_scope() + global_state.set_global_attribute('stateless_scope', self) + return self + + def add_loss(self, loss): + self.losses.append(loss) + + def add_update(self, update): + (variable, value) = update + self.state_mapping[id(variable)] = value + + def get_current_value(self, variable): + return self.state_mapping.get(id(variable), None) + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('stateless_scope', self.original_scope) + if self.original_scope is None and self.initialize_variables: + from keras.src.backend.common.variables import initialize_all_variables + initialize_all_variables() + +def in_stateless_scope(): + return global_state.get_global_attribute('stateless_scope') is not None + +def get_stateless_scope(): + return global_state.get_global_attribute('stateless_scope') + +# File: keras-master/keras/src/backend/common/symbolic_scope.py +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + +@keras_export('keras.SymbolicScope') +class SymbolicScope: + + def __enter__(self): + self.original_scope = get_symbolic_scope() + global_state.set_global_attribute('symbolic_scope', self) + return self + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('symbolic_scope', self.original_scope) + +def in_symbolic_scope(): + return global_state.get_global_attribute('symbolic_scope') is not None + +def get_symbolic_scope(): + return global_state.get_global_attribute('symbolic_scope') + +# File: keras-master/keras/src/backend/common/variables.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common import global_state +from keras.src.backend.common.name_scope import current_path +from keras.src.backend.common.stateless_scope import get_stateless_scope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.utils.module_utils import tensorflow as tf +from keras.src.utils.naming import auto_name + +class KerasVariable: + + def __init__(self, initializer, shape=None, dtype=None, trainable=True, autocast=True, aggregation='mean', name=None): + name = name or auto_name(self.__class__.__name__) + if not isinstance(name, str) or '/' in name: + raise ValueError(f'Argument `name` must be a string and cannot contain character `/`. Received: name={name}') + if aggregation not in ('mean', 'sum', 'only_first_replica'): + raise ValueError(f"Invalid valid for argument `aggregation`. Expected one of {{'mean', 'sum', 'only_first_replica'}}. Received: aggregation={aggregation}") + self.name = name + parent_path = current_path() + if parent_path: + self.path = current_path() + '/' + self.name + else: + self.path = self.name + dtype = standardize_dtype(dtype) + self._dtype = dtype + self._shape = None + self._initializer = None + self._regularizer = None + self._constraint = None + self._trainable = trainable + self._autocast = autocast + self._aggregation = aggregation + self._overwrite_with_gradient = False + if isinstance(initializer, str): + from keras.src import initializers + initializer = initializers.get(initializer) + if callable(initializer): + if shape is None: + raise ValueError(f'When creating a Variable from an initializer, the `shape` argument should be specified. Received: initializer={initializer} and shape={shape}') + if in_stateless_scope(): + if callable(initializer): + self._value = None + self._initializer = initializer + self._shape = self._validate_shape(shape) + register_uninitialized_variable(self) + else: + raise ValueError('You are attempting to create a variable while in a stateless scope. This is disallowed. Make sure that all variables are created before you start using your layer/model objects.\n\nIn some cases, you might be seeing this error because you need to implement a `def build(self, input_shape)` method on your layer/model, which will create its variables.\n\nIn some other cases, you might be seeing this error because you are instantiating a `Variable` and assigning it to a layer without going through self.add_variable()/self.add_weight(). Always prefer using these methods (with a `shape` and `initializer` argument).') + elif callable(initializer): + self._shape = self._validate_shape(shape) + self._initialize_with_initializer(initializer) + else: + self._initialize(initializer) + self._shape = tuple(self._value.shape) + self._ndim = len(self._shape) + + def _deferred_initialize(self): + if self._value is not None: + raise ValueError(f'Variable {self.path} is already initialized.') + if in_stateless_scope(): + raise ValueError('You are attempting to initialize a variable while in a stateless scope. This is disallowed. Make sure that all variables are initialized before you start using your layer/model objects.') + self._initialize_with_initializer(self._initializer) + self._initializer = None + + def _validate_shape(self, shape): + shape = standardize_shape(shape) + if None in shape: + raise ValueError(f"Shapes used to initialize variables must be fully-defined (no `None` dimensions). Received: shape={shape} for variable path='{self.path}'") + return shape + + def _maybe_autocast(self, value): + autocast_scope = get_autocast_scope() + if self._autocast and autocast_scope is not None: + return autocast_scope.maybe_cast(value) + return value + + def numpy(self): + return np.array(self) + + @property + def aggregation(self): + return self._aggregation + + @property + def value(self): + if in_stateless_scope(): + scope = get_stateless_scope() + value = scope.get_current_value(self) + if value is not None: + return self._maybe_autocast(value) + if self._value is None: + return self._maybe_autocast(self._initializer(self._shape, dtype=self._dtype)) + return self._maybe_autocast(self._value) + + def assign(self, value): + value = self._convert_to_tensor(value, dtype=self.dtype) + if not shape_equal(value.shape, self.shape): + raise ValueError(f'The shape of the target variable and the shape of the target value in `variable.assign(value)` must match. variable.shape={self.value.shape}, Received: value.shape={value.shape}. Target variable: {self}') + if in_stateless_scope(): + scope = get_stateless_scope() + scope.add_update((self, value)) + else: + self._direct_assign(value) + return value + + def assign_add(self, value): + return self.assign(self + value) + + def assign_sub(self, value): + return self.assign(self - value) + + @property + def dtype(self): + autocast_scope = get_autocast_scope() + if self._autocast and autocast_scope is not None and is_float_dtype(self._dtype): + return autocast_scope.dtype + return self._dtype + + @property + def shape(self): + return self._shape + + @property + def ndim(self): + return self._ndim + + @property + def trainable(self): + return self._trainable + + @trainable.setter + def trainable(self, value): + self._trainable = value + + @property + def overwrite_with_gradient(self): + return self._overwrite_with_gradient + + @overwrite_with_gradient.setter + def overwrite_with_gradient(self, value): + if not isinstance(value, bool): + raise TypeError(f'`overwrite_with_gradient` must be a boolean. Received: {value}') + self._overwrite_with_gradient = value + + @property + def regularizer(self): + return self._regularizer + + @regularizer.setter + def regularizer(self, value): + from keras.src.regularizers import Regularizer + if value is not None and (not isinstance(value, Regularizer)): + raise ValueError(f'Invalid value for attribute `regularizer`. Expected an instance of `keras.regularizers.Regularizer`, or `None`. Received: regularizer={value}') + self._regularizer = value + + @property + def constraint(self): + return self._constraint + + @constraint.setter + def constraint(self, value): + from keras.src.constraints import Constraint + if value is not None and (not isinstance(value, Constraint)): + raise ValueError(f'Invalid value for attribute `constraint`. Expected an instance of `keras.constraints.Constraint`, or `None`. Received: constraint={value}') + self._constraint = value + + def __repr__(self): + return f'' + + def _initialize(self, value): + raise NotImplementedError + + def _initialize_with_initializer(self, initializer): + value = initializer(self._shape, dtype=self._dtype) + self._initialize(value) + + def _convert_to_tensor(self, value, dtype=None): + raise NotImplementedError + + def __getitem__(self, idx): + return self.value.__getitem__(idx) + + def __int__(self): + if self.ndim > 0: + raise TypeError(f'Only scalar arrays can be converted to Python scalars. Got: shape={self.shape}') + return int(self.value) + + def __float__(self): + if self.ndim > 0: + raise TypeError(f'Only scalar arrays can be converted to Python scalars. Got: shape={self.shape}') + return float(self.value) + + def __array__(self, dtype=None): + return np.asarray(self.value.__array__(dtype)) + + def __bool__(self): + raise TypeError('A Keras Variable cannot be used as a boolean.') + + def __neg__(self): + return self.value.__neg__() + + def __pos__(self): + return self.value + + def __abs__(self): + return self.value.__abs__() + + def __invert__(self): + return self.value.__invert__() + + def __eq__(self, other): + return backend.numpy.equal(self.value, other) + + def __ne__(self, other): + return backend.numpy.not_equal(self.value, other) + + def __lt__(self, other): + return backend.numpy.less(self.value, other) + + def __le__(self, other): + return backend.numpy.less_equal(self.value, other) + + def __gt__(self, other): + return backend.numpy.greater(self.value, other) + + def __ge__(self, other): + return backend.numpy.greater_equal(self.value, other) + + def __add__(self, other): + return backend.numpy.add(self.value, other) + + def __radd__(self, other): + return backend.numpy.add(other, self.value) + + def __sub__(self, other): + return backend.numpy.subtract(self.value, other) + + def __rsub__(self, other): + return backend.numpy.subtract(other, self.value) + + def __mul__(self, other): + return backend.numpy.multiply(self.value, other) + + def __rmul__(self, other): + return backend.numpy.multiply(other, self.value) + + def __truediv__(self, other): + return backend.numpy.true_divide(self.value, other) + + def __rtruediv__(self, other): + return backend.numpy.true_divide(other, self.value) + + def __floordiv__(self, other): + return backend.numpy.floor_divide(self.value, other) + + def __rfloordiv__(self, other): + return backend.numpy.floor_divide(other, self.value) + + def __mod__(self, other): + return backend.numpy.mod(self.value, other) + + def __rmod__(self, other): + return backend.numpy.mod(other, self.value) + + def __pow__(self, other): + return backend.numpy.power(self.value, other) + + def __rpow__(self, other): + return backend.numpy.power(other, self.value) + + def __matmul__(self, other): + return backend.numpy.matmul(self.value, other) + + def __rmatmul__(self, other): + return backend.numpy.matmul(other, self.value) + + def __and__(self, other): + return backend.numpy.logical_and(self.value, other) + + def __rand__(self, other): + return backend.numpy.logical_and(other, self.value) + + def __or__(self, other): + return backend.numpy.logical_or(self.value, other) + + def __ror__(self, other): + return backend.numpy.logical_or(other, self.value) + + def __xor__(self, other): + return backend.numpy.logical_xor(self.value, other) + + def __rxor__(self, other): + return backend.numpy.logical_xor(other, self.value) + + def __round__(self, ndigits=None): + decimals = ndigits or 0 + return backend.numpy.round(self.value, decimals=decimals) + +def register_uninitialized_variable(variable): + uninitialized_variables = global_state.get_global_attribute('uninitialized_variables', [], set_to_default=True) + uninitialized_variables.append(variable) + +def initialize_all_variables(): + collection = global_state.get_global_attribute('uninitialized_variables') + if collection: + for v in collection: + v._deferred_initialize() + global_state.set_global_attribute('uninitialized_variables', []) + +@keras_export(['keras.utils.standardize_dtype', 'keras.backend.standardize_dtype']) +def standardize_dtype(dtype): + if dtype is None: + return config.floatx() + dtype = dtypes.PYTHON_DTYPES_MAP.get(dtype, dtype) + if hasattr(dtype, 'name'): + dtype = dtype.name + elif hasattr(dtype, '__str__') and ('torch' in str(dtype) or 'jax.numpy' in str(dtype)): + dtype = str(dtype).split('.')[-1] + elif hasattr(dtype, '__name__'): + dtype = dtype.__name__ + if dtype not in dtypes.ALLOWED_DTYPES: + raise ValueError(f'Invalid dtype: {dtype}') + return dtype + +def standardize_shape(shape): + if not isinstance(shape, tuple): + if shape is None: + raise ValueError('Undefined shapes are not supported.') + if not hasattr(shape, '__iter__'): + raise ValueError(f"Cannot convert '{shape}' to a shape.") + if config.backend() == 'tensorflow': + if isinstance(shape, tf.TensorShape): + shape = shape.as_list() + shape = tuple(shape) + if config.backend() == 'torch': + shape = tuple(map(lambda x: int(x) if x is not None else None, shape)) + for e in shape: + if e is None: + continue + if config.backend() == 'jax' and '_DimExpr' in str(type(e)): + continue + if not is_int_dtype(type(e)): + raise ValueError(f"Cannot convert '{shape}' to a shape. Found invalid entry '{e}' of type '{type(e)}'. ") + if e < 0: + raise ValueError(f"Cannot convert '{shape}' to a shape. Negative dimensions are not allowed.") + return shape + +def shape_equal(a_shape, b_shape): + if len(a_shape) != len(b_shape): + return False + for (e1, e2) in zip(a_shape, b_shape): + if e1 is not None and e2 is not None and (e1 != e2): + return False + return True + +@keras_export('keras.backend.is_float_dtype') +def is_float_dtype(dtype): + dtype = standardize_dtype(dtype) + return dtype.startswith('float') or dtype.startswith('bfloat') + +@keras_export('keras.backend.is_int_dtype') +def is_int_dtype(dtype): + dtype = standardize_dtype(dtype) + return dtype.startswith('int') or dtype.startswith('uint') + +def get_autocast_scope(): + return global_state.get_global_attribute('autocast_scope') + +class AutocastScope: + + def __init__(self, dtype): + if dtype is not None: + dtype = standardize_dtype(dtype) + if not is_float_dtype(dtype): + raise ValueError(f"`AutocastScope` can only be used with a floating-point target dtype, such as 'float16'. Received: dtype={dtype}") + self.dtype = dtype + self.original_scope = None + + def maybe_cast(self, value): + from keras.src import backend + if self.dtype is not None and is_float_dtype(value.dtype): + return backend.cast(value, dtype=self.dtype) + return value + + def __enter__(self): + self.original_scope = get_autocast_scope() + global_state.set_global_attribute('autocast_scope', self) + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('autocast_scope', self.original_scope) + +# File: keras-master/keras/src/backend/config.py +import json +import os +from keras.src.api_export import keras_export +_FLOATX = 'float32' +_EPSILON = 1e-07 +_IMAGE_DATA_FORMAT = 'channels_last' +_BACKEND = 'tensorflow' + +@keras_export(['keras.config.floatx', 'keras.backend.floatx']) +def floatx(): + return _FLOATX + +@keras_export(['keras.config.set_floatx', 'keras.backend.set_floatx']) +def set_floatx(value): + global _FLOATX + accepted_dtypes = {'bfloat16', 'float16', 'float32', 'float64'} + if value not in accepted_dtypes: + raise ValueError(f'Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}') + _FLOATX = str(value) + +@keras_export(['keras.config.epsilon', 'keras.backend.epsilon']) +def epsilon(): + return _EPSILON + +@keras_export(['keras.config.set_epsilon', 'keras.backend.set_epsilon']) +def set_epsilon(value): + global _EPSILON + _EPSILON = value + +@keras_export(['keras.config.image_data_format', 'keras.backend.image_data_format']) +def image_data_format(): + return _IMAGE_DATA_FORMAT + +@keras_export(['keras.config.set_image_data_format', 'keras.backend.set_image_data_format']) +def set_image_data_format(data_format): + global _IMAGE_DATA_FORMAT + data_format = str(data_format).lower() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f"The `data_format` argument must be one of {{'channels_first', 'channels_last'}}. Received: data_format={data_format}") + _IMAGE_DATA_FORMAT = data_format + +def standardize_data_format(data_format): + if data_format is None: + return image_data_format() + data_format = str(data_format).lower() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f"The `data_format` argument must be one of {{'channels_first', 'channels_last'}}. Received: data_format={data_format}") + return data_format +if 'KERAS_HOME' in os.environ: + _KERAS_DIR = os.environ.get('KERAS_HOME') +else: + _keras_base_dir = os.path.expanduser('~') + if not os.access(_keras_base_dir, os.W_OK): + _keras_base_dir = '/tmp' + _KERAS_DIR = os.path.join(_keras_base_dir, '.keras') + +def keras_home(): + return _KERAS_DIR +_config_path = os.path.expanduser(os.path.join(_KERAS_DIR, 'keras.json')) +if os.path.exists(_config_path): + try: + with open(_config_path) as f: + _config = json.load(f) + except ValueError: + _config = {} + _floatx = _config.get('floatx', floatx()) + assert _floatx in {'float16', 'float32', 'float64'} + _epsilon = _config.get('epsilon', epsilon()) + assert isinstance(_epsilon, float) + _backend = _config.get('backend', _BACKEND) + _image_data_format = _config.get('image_data_format', image_data_format()) + assert _image_data_format in {'channels_last', 'channels_first'} + set_floatx(_floatx) + set_epsilon(_epsilon) + set_image_data_format(_image_data_format) + _BACKEND = _backend +if not os.path.exists(_KERAS_DIR): + try: + os.makedirs(_KERAS_DIR) + except OSError: + pass +if not os.path.exists(_config_path): + _config = {'floatx': floatx(), 'epsilon': epsilon(), 'backend': _BACKEND, 'image_data_format': image_data_format()} + try: + with open(_config_path, 'w') as f: + f.write(json.dumps(_config, indent=4)) + except IOError: + pass +if 'KERAS_BACKEND' in os.environ: + _backend = os.environ['KERAS_BACKEND'] + if _backend: + _BACKEND = _backend +if _BACKEND != 'tensorflow': + os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' + +@keras_export(['keras.config.backend', 'keras.backend.backend']) +def backend(): + return _BACKEND + +# File: keras-master/keras/src/backend/exports.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend.common import KerasVariable +if backend.backend() == 'tensorflow': + BackendVariable = backend.tensorflow.core.Variable + backend_name_scope = backend.tensorflow.core.name_scope +elif backend.backend() == 'jax': + BackendVariable = backend.jax.core.Variable + backend_name_scope = backend.common.name_scope.name_scope +elif backend.backend() == 'torch': + BackendVariable = backend.torch.core.Variable + backend_name_scope = backend.common.name_scope.name_scope +elif backend.backend() == 'numpy': + from keras.src.backend.numpy.core import Variable as NumpyVariable + BackendVariable = NumpyVariable + backend_name_scope = backend.common.name_scope.name_scope +else: + raise RuntimeError(f'Invalid backend: {backend.backend()}') + +@keras_export('keras.Variable') +class Variable(BackendVariable, KerasVariable): + pass + +@keras_export('keras.name_scope') +class name_scope(backend_name_scope): + pass + +@keras_export('keras.device') +def device(device_name): + return backend.device_scope(device_name) + +# File: keras-master/keras/src/backend/jax/__init__.py +from keras.src.backend.jax import core +from keras.src.backend.jax import distribution_lib +from keras.src.backend.jax import image +from keras.src.backend.jax import linalg +from keras.src.backend.jax import math +from keras.src.backend.jax import nn +from keras.src.backend.jax import numpy +from keras.src.backend.jax import random +from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.jax.core import Variable +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import compute_output_spec +from keras.src.backend.jax.core import cond +from keras.src.backend.jax.core import convert_to_numpy +from keras.src.backend.jax.core import convert_to_tensor +from keras.src.backend.jax.core import device_scope +from keras.src.backend.jax.core import is_tensor +from keras.src.backend.jax.core import random_seed_dtype +from keras.src.backend.jax.core import scatter +from keras.src.backend.jax.core import shape +from keras.src.backend.jax.core import stop_gradient +from keras.src.backend.jax.core import vectorized_map +from keras.src.backend.jax.rnn import cudnn_ok +from keras.src.backend.jax.rnn import gru +from keras.src.backend.jax.rnn import lstm +from keras.src.backend.jax.rnn import rnn + +# File: keras-master/keras/src/backend/jax/core.py +import jax +import jax.experimental.sparse as jax_sparse +import jax.numpy as jnp +import ml_dtypes +import numpy as np +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.jax import distribution_lib +SUPPORTS_SPARSE_TENSORS = True + +class Variable(KerasVariable): + + def _initialize(self, value): + value = jnp.array(value, dtype=self._dtype) + self._shape = tuple(value.shape) + distribution = global_state.get_global_attribute('distribution') + if distribution is not None: + self._layout = distribution_lib._to_jax_layout(distribution.get_variable_layout(self)) + else: + self._layout = None + self._direct_assign(value) + + def _direct_assign(self, value): + if getattr(self, '_layout', None) is not None: + value = distribution_lib.distribute_variable(value, self._layout) + self._value = value + + def _convert_to_tensor(self, value, dtype=None): + return convert_to_tensor(value, dtype=dtype, sparse=False) + + def __jax_array__(self): + return self.value + +def convert_to_tensor(x, dtype=None, sparse=True): + if dtype is not None: + dtype = standardize_dtype(dtype) + if isinstance(x, (jnp.ndarray, jax.Array)) and (dtype is None or x.dtype == dtype): + return x + if isinstance(x, Variable): + if dtype is not None and x.dtype != dtype: + return x.value.astype(dtype) + return x.value + if isinstance(x, jax_sparse.JAXSparse): + if sparse is not None and (not sparse): + x = x.todense() + elif dtype is not None and x.dtype != dtype: + return x.astype(dtype) + else: + return x + if not is_tensor(x) and standardize_dtype(dtype) == 'bfloat16': + return jnp.asarray(x).astype(dtype) + return jnp.asarray(x, dtype=dtype) + +def convert_to_numpy(x): + if isinstance(x, jax_sparse.JAXSparse): + x = x.todense() + if is_tensor(x) and x.dtype == 'bfloat16': + return np.array(x, dtype=ml_dtypes.bfloat16) + return np.array(x) + +def is_tensor(x): + if isinstance(x, (jnp.ndarray, jax_sparse.JAXSparse)): + return True + return False + +def shape(x): + return x.shape + +def cast(x, dtype): + return convert_to_tensor(x, dtype=dtype) + +def compute_output_spec(fn, *args, **kwargs): + with StatelessScope(), SymbolicScope(): + built_in_types = (type(None), int, float, str, bool, complex, bytes) + static_args_idx = [] + static_args = [] + maybe_symbolic_args = [] + static_kwargs = {} + maybe_symbolic_kwargs = {} + for (idx, arg) in enumerate(args): + if isinstance(arg, built_in_types): + static_args_idx.append(idx) + static_args.append(arg) + else: + maybe_symbolic_args.append(arg) + maybe_symbolic_args = tuple(maybe_symbolic_args) + for (k, v) in kwargs.items(): + if isinstance(v, built_in_types): + static_kwargs[k] = v + else: + maybe_symbolic_kwargs[k] = v + has_none = False + for x in tree.flatten((maybe_symbolic_args, maybe_symbolic_kwargs)): + if isinstance(x, KerasTensor) and any((d is None for d in x.shape)): + has_none = True + + def convert_keras_tensor_to_jax(x, fill_value=None): + if isinstance(x, KerasTensor): + shape = list(x.shape) + if fill_value: + for (i, e) in enumerate(shape): + if e is None: + shape[i] = fill_value + jax_tensor = jax.ShapeDtypeStruct(shape, dtype=x.dtype) + return jax_tensor + if isinstance(x, dict): + return {k: convert_keras_tensor_to_jax(v, fill_value=fill_value) for (k, v) in x.items()} + if isinstance(x, list): + return [convert_keras_tensor_to_jax(xi, fill_value=fill_value) for xi in x] + return x + + def wrapped_fn(*args, **kwargs): + + def to_bcoo_if_sparse(x, maybe_symbolic_x): + if isinstance(maybe_symbolic_x, KerasTensor) and maybe_symbolic_x.sparse: + return jax_sparse.BCOO.fromdense(x, nse=1) + return x + (args, kwargs) = tree.map_structure(to_bcoo_if_sparse, (args, kwargs), (maybe_symbolic_args, maybe_symbolic_kwargs)) + rec_args = [] + idx_static = 0 + idx_sym = 0 + i = 0 + while idx_static < len(static_args) or idx_sym < len(args): + if i in static_args_idx: + rec_args.append(static_args[idx_static]) + idx_static += 1 + else: + rec_args.append(args[idx_sym]) + idx_sym += 1 + i += 1 + with StatelessScope(): + return fn(*rec_args, **kwargs, **static_kwargs) + if has_none: + (ms_args_1, ms_kwargs_1) = tree.map_structure(lambda x: convert_keras_tensor_to_jax(x, fill_value=83), (maybe_symbolic_args, maybe_symbolic_kwargs)) + (_, jax_out_1) = jax.make_jaxpr(wrapped_fn, return_shape=True)(*ms_args_1, **ms_kwargs_1) + (ms_args_2, ms_kwargs_2) = tree.map_structure(lambda x: convert_keras_tensor_to_jax(x, fill_value=89), (maybe_symbolic_args, maybe_symbolic_kwargs)) + (_, jax_out_2) = jax.make_jaxpr(wrapped_fn, return_shape=True)(*ms_args_2, **ms_kwargs_2) + + def merge_shapes(shape1, shape2): + return tuple([d1 if d1 == d2 else None for (d1, d2) in zip(shape1, shape2)]) + + def convert_jax_specs_to_keras_tensor(x1, x2): + if isinstance(x1, jax.ShapeDtypeStruct): + if not isinstance(x2, jax.ShapeDtypeStruct): + raise ValueError('Indeterministic output ordering.') + return KerasTensor(merge_shapes(x1.shape, x2.shape), dtype=x1.dtype) + elif isinstance(x1, jax_sparse.BCOO): + if not isinstance(x2, jax_sparse.BCOO): + raise ValueError('Indeterministic output ordering.') + return KerasTensor(merge_shapes(x1.shape, x2.shape), dtype=x1.dtype, sparse=True) + else: + return x1 + return tree.map_structure(convert_jax_specs_to_keras_tensor, jax_out_1, jax_out_2) + (maybe_symbolic_args, maybe_symbolic_kwargs) = tree.map_structure(convert_keras_tensor_to_jax, (maybe_symbolic_args, maybe_symbolic_kwargs)) + (_, jax_out) = jax.make_jaxpr(wrapped_fn, return_shape=True)(*maybe_symbolic_args, **maybe_symbolic_kwargs) + + def convert_jax_spec_to_keras_tensor(x): + if isinstance(x, jax.ShapeDtypeStruct): + return KerasTensor(x.shape, x.dtype) + elif isinstance(x, jax_sparse.BCOO): + return KerasTensor(x.shape, x.dtype, sparse=True) + return x + return tree.map_structure(convert_jax_spec_to_keras_tensor, jax_out) + +def cond(pred, true_fn, false_fn): + return jax.lax.cond(pred, true_fun=true_fn, false_fun=false_fn) + +def vectorized_map(function, elements): + return jax.vmap(function)(elements) + +def map(f, xs): + return jax.lax.map(f, xs) + +def scan(f, init, xs=None, length=None, reverse=False, unroll=1): + if not isinstance(unroll, bool): + if not isinstance(unroll, int) or unroll < 1: + raise ValueError(f'`unroll` must be an positive integer or boolean. Received: unroll={unroll}') + return jax.lax.scan(f, init=init, xs=xs, length=length, reverse=reverse, unroll=unroll) + +def associative_scan(f, elems, reverse=False, axis=0): + return jax.lax.associative_scan(f, elems, reverse, axis) + +def scatter(indices, values, shape): + zeros = jnp.zeros(shape, values.dtype) + key = tuple(jnp.moveaxis(indices, -1, 0)) + return zeros.at[key].add(values) + +def scatter_update(inputs, indices, updates): + inputs = convert_to_tensor(inputs) + indices = jnp.array(indices) + indices = jnp.transpose(indices) + inputs = inputs.at[tuple(indices)].set(updates) + return inputs + +def slice(inputs, start_indices, shape): + return jax.lax.dynamic_slice(inputs, start_indices, shape) + +def slice_update(inputs, start_indices, updates): + return jax.lax.dynamic_update_slice(inputs, updates, start_indices) + +def switch(index, branches, *operands): + return jax.lax.switch(index, branches, *operands) + +def while_loop(cond, body, loop_vars, maximum_iterations=None): + is_tuple = isinstance(loop_vars, (tuple, list)) + loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,) + if maximum_iterations is not None: + current_iter = 0 + loop_vars = loop_vars + (current_iter,) + + def _cond(args): + return cond(*args[:-1]) & (args[-1] < maximum_iterations) + + def _body(args): + outputs = body(*args[:-1]) + outputs = tuple(outputs) if is_tuple else (outputs,) + return outputs + (args[-1] + 1,) + else: + + def _cond(args): + return cond(*args) + + def _body(args): + outputs = body(*args) + return tuple(outputs) if is_tuple else (outputs,) + outputs = jax.lax.while_loop(_cond, _body, loop_vars) + if maximum_iterations is not None: + outputs = outputs[:-1] + return outputs if is_tuple else outputs[0] + +def fori_loop(lower, upper, body_fun, init_val): + return jax.lax.fori_loop(lower, upper, body_fun, init_val) + +def stop_gradient(variable): + return jax.lax.stop_gradient(variable) + +def unstack(x, num=None, axis=0): + return [jax.lax.index_in_dim(x, i, axis, keepdims=False) for i in range(x.shape[axis])] + +def random_seed_dtype(): + return 'uint32' + +def custom_gradient(fun): + return jax.custom_gradient(fun=fun) + +def device_scope(device_name): + if isinstance(device_name, str): + device_name = device_name.lower() + jax_device = distribution_lib._to_jax_device(device_name) + elif not isinstance(device_name, jax.Device): + raise ValueError(f"Invalid value for argument `device_name`. Expected a string like 'gpu:0' or a `jax.Device` instance. Received: device_name='{device_name}'") + else: + jax_device = device_name + return jax.default_device(jax_device) + +# File: keras-master/keras/src/backend/jax/distribution_lib.py +"""""" +import jax +import numpy as np +from keras.src.utils import jax_utils + +def list_devices(device_type=None): + device_type = device_type.lower() if device_type else None + jax_devices = jax.devices(backend=device_type) + return [f'{device.platform}:{device.id}' for device in jax_devices] + +def distribute_variable(value, layout): + if not isinstance(layout, jax.sharding.Sharding): + layout = _to_jax_layout(layout) + if isinstance(value, (jax.Array, jax.numpy.ndarray)) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)): + return value + if layout.is_fully_addressable: + return jax.device_put(value, layout) + else: + mapping = layout.addressable_devices_indices_map(value.shape) + local_values = jax.device_put([value[i] for i in mapping.values()], list(mapping.keys())) + global_value = jax.make_array_from_single_device_arrays(value.shape, layout, local_values) + return global_value + +def distribute_tensor(tensor, layout): + if not isinstance(layout, jax.sharding.Sharding): + layout = _to_jax_layout(layout) + if jax_utils.is_in_jax_tracing_scope(): + return jax.lax.with_sharding_constraint(tensor, layout) + if layout.is_fully_addressable: + return jax.device_put(tensor, layout) + else: + mapping = layout.addressable_devices_indices_map(tensor.shape) + local_values = jax.device_put([tensor[i] for i in mapping.values()], list(mapping.keys())) + global_value = jax.make_array_from_single_device_arrays(tensor.shape, layout, local_values) + return global_value + +def distribute_data_input(per_process_batch, layout): + if not isinstance(layout, jax.sharding.Sharding): + layout = _to_jax_layout(layout) + mesh_shape = list(layout.mesh.shape.values()) + num_model_replicas_total = mesh_shape[0] + mesh_model_dim_size = mesh_shape[1] if len(mesh_shape) > 1 else 1 + num_model_replicas_per_process = num_model_replicas_total / num_processes() + per_process_batch_size = per_process_batch.shape[0] + if num_model_replicas_per_process >= 1: + if num_model_replicas_total % num_processes() != 0: + raise ValueError(f'If there is more than one replica per process, the batch dimension of the mesh should be divisible by the number of processes. Here, batch dimension = {num_model_replicas_total}, while number of processes = {num_processes()}') + per_replica_batch_size = int(per_process_batch_size // num_model_replicas_per_process) + if per_process_batch_size % per_replica_batch_size != 0: + raise ValueError(f'`per_process_batch_size` should be divisible by `per_replica_batch_size`. per_process_batch_size={per_process_batch_size} and per_replica_batch_size = {per_replica_batch_size}') + per_replica_batches = np.split(per_process_batch, num_model_replicas_per_process) + per_device_batches = [per_replica_batch for per_replica_batch in per_replica_batches for _ in range(mesh_model_dim_size)] + batches_on_devices = [jax.device_put(batch, device) for (batch, device) in zip(per_device_batches, layout.addressable_devices)] + else: + per_replica_batch_size = per_process_batch_size + batches_on_devices = [jax.device_put(per_process_batch, device) for device in layout.addressable_devices] + global_batch_size = per_replica_batch_size * num_model_replicas_total + global_batch_shape = (global_batch_size,) + per_process_batch.shape[1:] + global_batch_array = jax.make_array_from_single_device_arrays(shape=global_batch_shape, sharding=layout, arrays=batches_on_devices) + return global_batch_array + +def initialize(job_addresses, num_processes, process_id): + if job_addresses and ',' in job_addresses: + job_addresses = job_addresses.split(',') + if num_processes is not None and num_processes != len(job_addresses): + raise ValueError(f'The provided job_addresses {job_addresses} has {len(job_addresses)} jobs, but num_processes is {num_processes}') + coordinator_address = job_addresses[0] + else: + coordinator_address = job_addresses + jax.distributed.initialize(coordinator_address=coordinator_address, num_processes=num_processes, process_id=process_id) + +def num_processes(): + return jax.process_count() + +def process_id(): + return jax.process_index() + +def _to_jax_device(device_name): + if isinstance(device_name, jax.Device): + return device_name + (device_type, device_id) = device_name.split(':') + devices = jax.devices(backend=device_type) + for device in devices: + if device.platform == device_type and device.id == int(device_id): + return device + raise ValueError(f'Device not found: {device_name}') + +def _to_jax_mesh(device_mesh): + shape = device_mesh.devices.shape + devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()] + devices = np.array(devices).reshape(shape) + return jax.sharding.Mesh(devices, device_mesh.axis_names) + +def _to_jax_layout(tensor_layout): + if tensor_layout.device_mesh is None: + raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.') + partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes) + jax_mesh = _to_jax_mesh(tensor_layout.device_mesh) + return jax.sharding.NamedSharding(jax_mesh, partition_spec) + +# File: keras-master/keras/src/backend/jax/image.py +import functools +import jax +import jax.numpy as jnp +from keras.src import backend +from keras.src.backend.jax.core import convert_to_tensor +RESIZE_INTERPOLATIONS = ('bilinear', 'nearest', 'lanczos3', 'lanczos5', 'bicubic') + +def rgb_to_grayscale(images, data_format=None): + images = convert_to_tensor(images) + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + original_dtype = images.dtype + compute_dtype = backend.result_type(images.dtype, float) + images = images.astype(compute_dtype) + rgb_weights = convert_to_tensor([0.2989, 0.587, 0.114], dtype=images.dtype) + images = jnp.tensordot(images, rgb_weights, axes=(channels_axis, -1)) + images = jnp.expand_dims(images, axis=channels_axis) + return images.astype(original_dtype) + +def rgb_to_hsv(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + eps = jnp.finfo(dtype).eps + images = jnp.where(jnp.abs(images) < eps, 0.0, images) + (red, green, blue) = jnp.split(images, 3, channels_axis) + red = jnp.squeeze(red, channels_axis) + green = jnp.squeeze(green, channels_axis) + blue = jnp.squeeze(blue, channels_axis) + + def rgb_planes_to_hsv_planes(r, g, b): + value = jnp.maximum(jnp.maximum(r, g), b) + minimum = jnp.minimum(jnp.minimum(r, g), b) + range_ = value - minimum + safe_value = jnp.where(value > 0, value, 1.0) + safe_range = jnp.where(range_ > 0, range_, 1.0) + saturation = jnp.where(value > 0, range_ / safe_value, 0.0) + norm = 1.0 / (6.0 * safe_range) + hue = jnp.where(value == g, norm * (b - r) + 2.0 / 6.0, norm * (r - g) + 4.0 / 6.0) + hue = jnp.where(value == r, norm * (g - b), hue) + hue = jnp.where(range_ > 0, hue, 0.0) + (hue < 0.0).astype(hue.dtype) + return (hue, saturation, value) + images = jnp.stack(rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis) + return images + +def hsv_to_rgb(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + (hue, saturation, value) = jnp.split(images, 3, channels_axis) + hue = jnp.squeeze(hue, channels_axis) + saturation = jnp.squeeze(saturation, channels_axis) + value = jnp.squeeze(value, channels_axis) + + def hsv_planes_to_rgb_planes(hue, saturation, value): + dh = jnp.mod(hue, 1.0) * 6.0 + dr = jnp.clip(jnp.abs(dh - 3.0) - 1.0, 0.0, 1.0) + dg = jnp.clip(2.0 - jnp.abs(dh - 2.0), 0.0, 1.0) + db = jnp.clip(2.0 - jnp.abs(dh - 4.0), 0.0, 1.0) + one_minus_s = 1.0 - saturation + red = value * (one_minus_s + saturation * dr) + green = value * (one_minus_s + saturation * dg) + blue = value * (one_minus_s + saturation * db) + return (red, green, blue) + images = jnp.stack(hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis) + return images + +def resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in RESIZE_INTERPOLATIONS: + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}') + if fill_mode != 'constant': + raise ValueError(f"Invalid value for argument `fill_mode`. Only `'constant'` is supported. Received: fill_mode={fill_mode}") + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError('Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` can be `True`.') + if not len(size) == 2: + raise ValueError(f'Argument `size` must be a tuple of two elements (height, width). Received: size={size}') + size = tuple(size) + (target_height, target_width) = size + if len(images.shape) == 4: + if data_format == 'channels_last': + size = (images.shape[0],) + size + (images.shape[-1],) + else: + size = (images.shape[0], images.shape[1]) + size + batch_size = images.shape[0] + elif len(images.shape) == 3: + if data_format == 'channels_last': + size = size + (images.shape[-1],) + else: + size = (images.shape[0],) + size + else: + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if crop_to_aspect_ratio: + shape = images.shape + if data_format == 'channels_last': + (height, width) = (shape[-3], shape[-2]) + else: + (height, width) = (shape[-2], shape[-1]) + crop_height = int(float(width * target_height) / target_width) + crop_height = max(min(height, crop_height), 1) + crop_width = int(float(height * target_width) / target_height) + crop_width = max(min(width, crop_width), 1) + crop_box_hstart = int(float(height - crop_height) / 2) + crop_box_wstart = int(float(width - crop_width) / 2) + if data_format == 'channels_last': + if len(images.shape) == 4: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + else: + images = images[crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + elif len(images.shape) == 4: + images = images[:, :, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + else: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + elif pad_to_aspect_ratio: + shape = images.shape + if data_format == 'channels_last': + (height, width, channels) = (shape[-3], shape[-2], shape[-1]) + else: + (height, width, channels) = (shape[-2], shape[-1], shape[-3]) + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if data_format == 'channels_last': + if img_box_hstart > 0: + if len(images.shape) == 4: + padded_img = jnp.concatenate([jnp.ones((batch_size, img_box_hstart, width, channels), dtype=images.dtype) * fill_value, images, jnp.ones((batch_size, img_box_hstart, width, channels), dtype=images.dtype) * fill_value], axis=1) + else: + padded_img = jnp.concatenate([jnp.ones((img_box_hstart, width, channels), dtype=images.dtype) * fill_value, images, jnp.ones((img_box_hstart, width, channels), dtype=images.dtype) * fill_value], axis=0) + elif img_box_wstart > 0: + if len(images.shape) == 4: + padded_img = jnp.concatenate([jnp.ones((batch_size, height, img_box_wstart, channels), dtype=images.dtype) * fill_value, images, jnp.ones((batch_size, height, img_box_wstart, channels), dtype=images.dtype) * fill_value], axis=2) + else: + padded_img = jnp.concatenate([jnp.ones((height, img_box_wstart, channels), dtype=images.dtype) * fill_value, images, jnp.ones((height, img_box_wstart, channels), dtype=images.dtype) * fill_value], axis=1) + else: + padded_img = images + elif img_box_hstart > 0: + if len(images.shape) == 4: + padded_img = jnp.concatenate([jnp.ones((batch_size, channels, img_box_hstart, width)) * fill_value, images, jnp.ones((batch_size, channels, img_box_hstart, width)) * fill_value], axis=2) + else: + padded_img = jnp.concatenate([jnp.ones((channels, img_box_hstart, width)) * fill_value, images, jnp.ones((channels, img_box_hstart, width)) * fill_value], axis=1) + elif img_box_wstart > 0: + if len(images.shape) == 4: + padded_img = jnp.concatenate([jnp.ones((batch_size, channels, height, img_box_wstart)) * fill_value, images, jnp.ones((batch_size, channels, height, img_box_wstart)) * fill_value], axis=3) + else: + padded_img = jnp.concatenate([jnp.ones((channels, height, img_box_wstart)) * fill_value, images, jnp.ones((channels, height, img_box_wstart)) * fill_value], axis=2) + else: + padded_img = images + images = padded_img + return jax.image.resize(images, size, method=interpolation, antialias=antialias) +AFFINE_TRANSFORM_INTERPOLATIONS = {'nearest': 0, 'bilinear': 1} +AFFINE_TRANSFORM_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'} + +def affine_transform(images, transform, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys(): + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: interpolation={interpolation}') + if fill_mode not in AFFINE_TRANSFORM_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected of one {AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}') + transform = convert_to_tensor(transform) + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if len(transform.shape) not in (1, 2): + raise ValueError(f'Invalid transform rank: expected rank 1 (single transform) or rank 2 (batch of transforms). Received input with shape: transform.shape={transform.shape}') + need_squeeze = False + if len(images.shape) == 3: + images = jnp.expand_dims(images, axis=0) + need_squeeze = True + if len(transform.shape) == 1: + transform = jnp.expand_dims(transform, axis=0) + if data_format == 'channels_first': + images = jnp.transpose(images, (0, 2, 3, 1)) + batch_size = images.shape[0] + meshgrid = jnp.meshgrid(*[jnp.arange(size) for size in images.shape[1:]], indexing='ij') + indices = jnp.concatenate([jnp.expand_dims(x, axis=-1) for x in meshgrid], axis=-1) + indices = jnp.tile(indices, (batch_size, 1, 1, 1, 1)) + a0 = transform[:, 0] + a2 = transform[:, 2] + b1 = transform[:, 4] + b2 = transform[:, 5] + transform = transform.at[:, 0].set(b1) + transform = transform.at[:, 2].set(b2) + transform = transform.at[:, 4].set(a0) + transform = transform.at[:, 5].set(a2) + transform = jnp.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1) + transform = jnp.reshape(transform, (batch_size, 3, 3)) + offset = transform[:, 0:2, 2] + offset = jnp.pad(offset, pad_width=[[0, 0], [0, 1]]) + transform = transform.at[:, 0:2, 2].set(0) + coordinates = jnp.einsum('Bhwij, Bjk -> Bhwik', indices, transform) + coordinates = jnp.moveaxis(coordinates, source=-1, destination=1) + coordinates += jnp.reshape(a=offset, newshape=(*offset.shape, 1, 1, 1)) + _map_coordinates = functools.partial(jax.scipy.ndimage.map_coordinates, order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation], mode=fill_mode, cval=fill_value) + affined = jax.vmap(_map_coordinates)(images, coordinates) + if data_format == 'channels_first': + affined = jnp.transpose(affined, (0, 3, 1, 2)) + if need_squeeze: + affined = jnp.squeeze(affined, axis=0) + return affined +MAP_COORDINATES_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'} + +def map_coordinates(inputs, coordinates, order, fill_mode='constant', fill_value=0.0): + inputs = convert_to_tensor(inputs) + coordinates = convert_to_tensor(coordinates) + if coordinates.shape[0] != len(inputs.shape): + raise ValueError(f'First dim of `coordinates` must be the same as the rank of `inputs`. Received inputs with shape: {inputs.shape} and coordinate leading dim of {coordinates.shape[0]}') + if len(coordinates.shape) < 2: + raise ValueError(f'Invalid coordinates rank: expected at least rank 2. Received input with shape: {coordinates.shape}') + if fill_mode not in MAP_COORDINATES_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected one of {set(MAP_COORDINATES_FILL_MODES)}. Received: fill_mode={fill_mode}') + if order not in range(2): + raise ValueError(f'Invalid value for argument `order`. Expected one of {[0, 1]}. Received: order={order}') + return jax.scipy.ndimage.map_coordinates(inputs, coordinates, order, fill_mode, fill_value) + +# File: keras-master/keras/src/backend/jax/linalg.py +import jax +import jax.numpy as jnp +import jax.scipy as jsp +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor + +def cholesky(a): + out = jnp.linalg.cholesky(a) + try: + if jnp.any(jnp.isnan(out)): + raise ValueError('Cholesky decomposition failed. The input might not be a valid positive definite matrix.') + except jax.errors.TracerBoolConversionError: + pass + return out + +def det(a): + return jnp.linalg.det(a) + +def eig(x): + return jnp.linalg.eig(x) + +def eigh(x): + return jnp.linalg.eigh(x) + +def inv(a): + return jnp.linalg.inv(a) + +def lu_factor(x): + lu_factor_fn = jsp.linalg.lu_factor + if x.ndim > 2: + for i in range(x.ndim - 2): + lu_factor_fn = jax.vmap(lu_factor_fn) + return lu_factor_fn(x) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + return jnp.linalg.qr(x, mode=mode) + +def solve(a, b): + return jnp.linalg.solve(a, b) + +def solve_triangular(a, b, lower=False): + return jsp.linalg.solve_triangular(a, b, lower=lower) + +def svd(x, full_matrices=True, compute_uv=True): + return jnp.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv) + +def lstsq(a, b, rcond=None): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return jnp.linalg.lstsq(a, b, rcond=rcond)[0] + +# File: keras-master/keras/src/backend/jax/math.py +import math +import jax +import jax.numpy as jnp +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor +from keras.src.utils.module_utils import scipy + +def segment_sum(data, segment_ids, num_segments=None, sorted=False): + if num_segments is None: + raise ValueError('Argument `num_segments` must be set when using the JAX backend. Received: num_segments=None') + return jax.ops.segment_sum(data, segment_ids, num_segments, indices_are_sorted=sorted) + +def segment_max(data, segment_ids, num_segments=None, sorted=False): + if num_segments is None: + raise ValueError('Argument `num_segments` must be set when using the JAX backend. Received: num_segments=None') + return jax.ops.segment_max(data, segment_ids, num_segments, indices_are_sorted=sorted) + +def top_k(x, k, sorted=True): + return jax.lax.top_k(x, k) + +def in_top_k(targets, predictions, k): + preds_at_label = jnp.take_along_axis(predictions, jnp.expand_dims(targets, axis=-1), axis=-1) + preds_at_label = jnp.where(jnp.isnan(preds_at_label), -jnp.inf, preds_at_label) + rank = 1 + jnp.sum(jnp.greater(predictions, preds_at_label), axis=-1) + return jnp.less_equal(rank, k) + +def logsumexp(x, axis=None, keepdims=False): + max_x = jnp.max(x, axis=axis, keepdims=True) + result = jnp.log(jnp.sum(jnp.exp(x - max_x), axis=axis, keepdims=True)) + max_x + return jnp.squeeze(result) if not keepdims else result + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + return jnp.linalg.qr(x, mode=mode) + +def extract_sequences(x, sequence_length, sequence_stride): + (*batch_shape, signal_length) = x.shape + batch_shape = list(batch_shape) + x = jnp.reshape(x, (math.prod(batch_shape), signal_length, 1)) + x = jax.lax.conv_general_dilated_patches(x, (sequence_length,), (sequence_stride,), 'VALID', dimension_numbers=('NTC', 'OIT', 'NTC')) + return jnp.reshape(x, (*batch_shape, *x.shape[-2:])) + +def _get_complex_tensor_from_tuple(x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if not jnp.issubdtype(real.dtype, jnp.floating) or not jnp.issubdtype(imag.dtype, jnp.floating): + raise ValueError(f'At least one tensor in input `x` is not of type float.Received: x={x}.') + complex_input = jax.lax.complex(real, imag) + return complex_input + +def fft(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = jnp.fft.fft(complex_input) + return (jnp.real(complex_output), jnp.imag(complex_output)) + +def fft2(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = jnp.fft.fft2(complex_input) + return (jnp.real(complex_output), jnp.imag(complex_output)) + +def rfft(x, fft_length=None): + complex_output = jnp.fft.rfft(x, n=fft_length, axis=-1, norm='backward') + return (jnp.real(complex_output), jnp.imag(complex_output)) + +def irfft(x, fft_length=None): + complex_input = _get_complex_tensor_from_tuple(x) + return jnp.fft.irfft(complex_input, n=fft_length, axis=-1, norm='backward') + +def stft(x, sequence_length, sequence_stride, fft_length, window='hann', center=True): + if standardize_dtype(x.dtype) not in {'float32', 'float64'}: + raise TypeError(f'Invalid input type. Expected `float32` or `float64`. Received: input type={x.dtype}') + if fft_length < sequence_length: + raise ValueError(f'`fft_length` must equal or larger than `sequence_length`. Received: sequence_length={sequence_length}, fft_length={fft_length}') + if isinstance(window, str): + if window not in {'hann', 'hamming'}: + raise ValueError(f'If a string is passed to `window`, it must be one of `"hann"`, `"hamming"`. Received: window={window}') + x = convert_to_tensor(x) + if center: + pad_width = [(0, 0) for _ in range(len(x.shape))] + pad_width[-1] = (fft_length // 2, fft_length // 2) + x = jnp.pad(x, pad_width, mode='reflect') + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + win = convert_to_tensor(scipy.signal.get_window(window, sequence_length), dtype=x.dtype) + else: + win = convert_to_tensor(window, dtype=x.dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + win = jnp.pad(win, [[l_pad, r_pad]]) + else: + win = jnp.ones(sequence_length + l_pad + r_pad, dtype=x.dtype) + result = jax.scipy.signal.stft(x, fs=1.0, window=win, nperseg=sequence_length + l_pad + r_pad, noverlap=sequence_length + l_pad + r_pad - sequence_stride, nfft=fft_length, boundary=None, padded=False)[-1] + scale = jnp.sqrt(1.0 / win.sum() ** 2) + result = result / scale + result = jnp.swapaxes(result, -2, -1) + return (jnp.real(result), jnp.imag(result)) + +def istft(x, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + x = _get_complex_tensor_from_tuple(x) + dtype = jnp.real(x).dtype + if len(x.shape) < 2: + raise ValueError(f'Input `x` must have at least 2 dimensions. Received shape: {x.shape}') + expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1) + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + win = convert_to_tensor(scipy.signal.get_window(window, sequence_length), dtype=dtype) + else: + win = convert_to_tensor(window, dtype=dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + win = jnp.pad(win, [[l_pad, r_pad]]) + else: + win = jnp.ones(sequence_length + l_pad + r_pad, dtype=dtype) + x = jax.scipy.signal.istft(x, fs=1.0, window=win, nperseg=sequence_length + l_pad + r_pad, noverlap=sequence_length + l_pad + r_pad - sequence_stride, nfft=fft_length, boundary=False, time_axis=-2, freq_axis=-1)[-1] + x = x / win.sum() if window is not None else x / sequence_stride + start = 0 if center is False else fft_length // 2 + if length is not None: + end = start + length + elif center is True: + end = -(fft_length // 2) + else: + end = expected_output_len + return x[..., start:end] + +def rsqrt(x): + return jax.lax.rsqrt(x) + +def erf(x): + return jax.lax.erf(x) + +def erfinv(x): + return jax.lax.erf_inv(x) + +def solve(a, b): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return jnp.linalg.solve(a, b) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) + +def logdet(x): + from keras.src.backend.jax.numpy import slogdet + return slogdet(x)[1] + +# File: keras-master/keras/src/backend/jax/nn.py +import builtins +import math +import jax +import jax.experimental.sparse as jax_sparse +import jax.numpy as jnp +from jax import lax +from jax import nn as jnn +from keras.src import backend +from keras.src.backend.common.backend_utils import compute_conv_transpose_padding_args_for_jax +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor + +def relu(x): + x = convert_to_tensor(x) + return jnn.relu(x) + +def relu6(x): + x = convert_to_tensor(x) + return jnn.relu6(x) + +def sigmoid(x): + x = convert_to_tensor(x) + return jnn.sigmoid(x) + +def tanh(x): + x = convert_to_tensor(x) + return jnn.tanh(x) + +def softplus(x): + x = convert_to_tensor(x) + return jnn.softplus(x) + +def softsign(x): + x = convert_to_tensor(x) + return jnn.soft_sign(x) + +def silu(x): + x = convert_to_tensor(x) + return jnn.silu(x) + +def log_sigmoid(x): + x = convert_to_tensor(x) + return jnn.log_sigmoid(x) + +def leaky_relu(x, negative_slope=0.2): + x = convert_to_tensor(x) + return jnn.leaky_relu(x, negative_slope=negative_slope) + +def hard_sigmoid(x): + x = convert_to_tensor(x) + return jnn.hard_sigmoid(x) + +def hard_silu(x): + x = convert_to_tensor(x) + return jnn.hard_silu(x) + +def elu(x, alpha=1.0): + x = convert_to_tensor(x) + return jnn.elu(x, alpha=alpha) + +def selu(x): + x = convert_to_tensor(x) + return jnn.selu(x) + +def gelu(x, approximate=True): + x = convert_to_tensor(x) + return jnn.gelu(x, approximate) + +def softmax(x, axis=-1): + x = convert_to_tensor(x) + return jnn.softmax(x, axis=axis) + +def log_softmax(x, axis=-1): + x = convert_to_tensor(x) + return jnn.log_softmax(x, axis=axis) + +def _convert_to_spatial_operand(x, num_spatial_dims, data_format='channels_last', include_batch_and_channels=True): + x = (x,) * num_spatial_dims if isinstance(x, int) else x + if not include_batch_and_channels: + return x + if data_format == 'channels_last': + x = (1,) + x + (1,) + else: + x = (1,) + (1,) + x + return x + +def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'): + if padding not in ('same', 'valid'): + raise ValueError(f"Invalid padding '{padding}', must be 'same' or 'valid'.") + padding = padding.upper() + return lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding) + +def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + pool_size = _convert_to_spatial_operand(pool_size, num_spatial_dims, data_format) + strides = pool_size if strides is None else strides + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format) + return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding) + +def average_pool(inputs, pool_size, strides, padding, data_format=None): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + pool_size = _convert_to_spatial_operand(pool_size, num_spatial_dims, data_format) + strides = pool_size if strides is None else strides + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format) + pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding) + if padding == 'valid': + return pooled / math.prod(pool_size) + else: + shape = [a if b != 1 else 1 for (a, b) in zip(inputs.shape, pool_size)] + window_counts = _pool(jnp.ones(shape, inputs.dtype), 0.0, lax.add, pool_size, strides, padding) + return pooled / window_counts + +def _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format='channels_last', transpose=False): + num_dims = num_spatial_dims + 2 + if data_format == 'channels_last': + spatial_dims = tuple(range(1, num_dims - 1)) + inputs_dn = (0, num_dims - 1) + spatial_dims + else: + spatial_dims = tuple(range(2, num_dims)) + inputs_dn = (0, 1) + spatial_dims + if transpose: + kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2)) + else: + kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2)) + return lax.ConvDimensionNumbers(lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn) + +def conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + if data_format == 'channels_last': + channels = inputs.shape[-1] + else: + channels = inputs.shape[1] + kernel_in_channels = kernel.shape[-2] + if channels % kernel_in_channels > 0: + raise ValueError(f"The number of input channels must be evenly divisible by kernel's in_channels. Received input channels {channels} and kernel in_channels {kernel_in_channels}. ") + feature_group_count = channels // kernel_in_channels + kernel = convert_to_tensor(kernel) + inputs = convert_to_tensor(inputs, dtype=kernel.dtype) + return jax.lax.conv_general_dilated(inputs, kernel, strides, padding, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count) + +def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + feature_group_count = inputs.shape[-1] if data_format == 'channels_last' else inputs.shape[1] + kernel = jnp.reshape(kernel, kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1])) + return jax.lax.conv_general_dilated(inputs, kernel, strides, padding, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count) + +def separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + depthwise_conv_output = depthwise_conv(inputs, depthwise_kernel, strides, padding, data_format, dilation_rate) + return conv(depthwise_conv_output, pointwise_kernel, strides=1, padding='valid', data_format=data_format, dilation_rate=dilation_rate) + +def conv_transpose(inputs, kernel, strides=1, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + padding_values = compute_conv_transpose_padding_args_for_jax(input_shape=inputs.shape, kernel_shape=kernel.shape, strides=strides, padding=padding, output_padding=output_padding, dilation_rate=dilation_rate) + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + return jax.lax.conv_transpose(inputs, kernel, strides, padding=padding_values, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, transpose_kernel=True) + +def one_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + x = convert_to_tensor(x) + if sparse: + if axis < 0: + axis = axis + len(x.shape) + 1 + if dtype is None: + dtype = 'float32' + values = jnp.greater_equal(jnp.ravel(x), 0).astype(dtype) + values_count = values.shape[0] + indices = [jnp.arange(dim) for dim in x.shape] + indices = jnp.meshgrid(*indices, indexing='ij') + indices.insert(axis, jnp.maximum(x, 0)) + indices = [a.reshape(values_count, 1).astype('int32') for a in indices] + indices = jnp.concatenate(indices, axis=1) + shape = list(x.shape) + shape.insert(axis, num_classes) + shape = tuple(shape) + return jax_sparse.BCOO((values, indices), shape=shape, indices_sorted=True, unique_indices=True) + return jnn.one_hot(x, num_classes, axis=axis, dtype=dtype) + +def multi_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + x = convert_to_tensor(x) + reduction_axis = 1 if len(x.shape) > 1 else 0 + if sparse: + result = one_hot(x, num_classes, axis=axis, dtype='int32', sparse=sparse) + result = jax_sparse.bcoo_reduce_sum(result, axes=(reduction_axis,)) + result = jax_sparse.bcoo_sum_duplicates(result) + values = jnp.greater_equal(result.data, 0).astype(dtype) + return jax_sparse.BCOO((values, result.indices), shape=result.shape, indices_sorted=True, unique_indices=True) + return jnp.max(one_hot(cast(x, 'int32'), num_classes, axis=axis, dtype=dtype), axis=reduction_axis) + +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = jnp.array(target) + output = jnp.array(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if len(target.shape) < 1: + raise ValueError(f'Arguments `target` and `output` must be at least rank 1. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = jax.nn.log_softmax(output, axis=axis) + else: + output = output / jnp.sum(output, axis, keepdims=True) + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = jnp.log(output) + return -jnp.sum(target * log_prob, axis=axis) + +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = jnp.array(target, dtype='int32') + output = jnp.array(output) + if len(target.shape) == len(output.shape) and target.shape[-1] == 1: + target = jnp.squeeze(target, axis=-1) + if len(output.shape) < 1: + raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') + if target.shape != output.shape[:-1]: + raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = jax.nn.log_softmax(output, axis=axis) + else: + output = output / jnp.sum(output, axis, keepdims=True) + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = jnp.log(output) + target = jnn.one_hot(target, output.shape[axis], axis=axis) + return -jnp.sum(target * log_prob, axis=axis) + +def binary_crossentropy(target, output, from_logits=False): + target = jnp.array(target) + output = jnp.array(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_logits = jax.nn.log_sigmoid(output) + log_neg_logits = jax.nn.log_sigmoid(-output) + return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + bce = target * jnp.log(output) + bce += (1.0 - target) * jnp.log(1.0 - output) + return -bce + +def moments(x, axes, keepdims=False, synchronized=False): + if synchronized: + raise NotImplementedError('Argument synchronized=True is not supported with JAX.') + need_cast = False + ori_dtype = backend.standardize_dtype(x.dtype) + if ori_dtype in ('float16', 'bfloat16'): + need_cast = True + x = cast(x, 'float32') + mean = jnp.mean(x, axes, keepdims=True) + variance = jnp.var(x, axis=axes, keepdims=True) + if not keepdims: + mean = jnp.squeeze(mean, axes) + variance = jnp.squeeze(variance, axes) + if need_cast: + mean = jnp.clip(mean, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max) + variance = jnp.clip(variance, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max) + mean = cast(mean, ori_dtype) + variance = cast(variance, ori_dtype) + return (mean, variance) + +def batch_normalization(x, mean, variance, axis, offset=None, scale=None, epsilon=0.001): + shape = [1] * len(x.shape) + shape[axis] = mean.shape[0] + mean = jnp.reshape(mean, shape) + variance = jnp.reshape(variance, shape) + inv = jax.lax.rsqrt(variance + epsilon) + if scale is not None: + scale = jnp.reshape(scale, shape) + inv = inv * scale + res = -mean * inv + if offset is not None: + offset = jnp.reshape(offset, shape) + res = res + offset + return jnp.add(x * inv, res) + +def ctc_loss(target, output, target_length, output_length, mask_index=0): + target = convert_to_tensor(target, dtype='int32') + output = convert_to_tensor(output) + target_length = convert_to_tensor(target_length, 'int32') + output_length = convert_to_tensor(output_length, 'int32') + (batch_size, max_input_length, num_classes) = output.shape + (batch_size, max_label_length) = target.shape + log_epsilon = -100000.0 + dtype = backend.result_type(output.dtype, 'float32') + output = cast(output, dtype) + + def _lengths_to_paddings(lengths, max_length): + indices = jnp.arange(max_length).reshape((1,) * lengths.ndim + (max_length,)) + lengths = jnp.expand_dims(lengths, axis=-1) + elem_valid = indices < lengths + return jnp.logical_not(elem_valid) + target_paddings = _lengths_to_paddings(target_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_input_length) + target_paddings = target_paddings.astype(output.dtype) + output_paddings = output_paddings.astype(output.dtype) + logprobs = jnn.log_softmax(output) + label_lengths = max_label_length - jnp.sum(target_paddings, axis=1).astype(jnp.int32) + repeat = (target[:, :-1] == target[:, 1:]).astype(jnp.float32) + repeat = jnp.pad(repeat, ((0, 0), (0, 1))) + logprobs_phi = logprobs[:, :, mask_index:mask_index + 1] + logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) + _one_hot = jax.nn.one_hot(target, num_classes=num_classes) + logprobs_emit = jnp.einsum('btk,bnk->btn', logprobs, _one_hot) + logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) + logalpha_phi_init = jnp.ones((batch_size, max_label_length + 1), dtype=output.dtype) * log_epsilon + logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0) + logalpha_emit_init = jnp.ones((batch_size, max_label_length), dtype=output.dtype) * log_epsilon + + def update_phi_score(phi, added_score): + return jnp.concatenate([phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1) + + def loop_body(prev, x): + (prev_phi, prev_emit) = prev + prev_phi_orig = prev_phi + prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat) + (logprob_emit, logprob_phi, pad) = x + next_emit = jnp.logaddexp(prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit) + next_phi = prev_phi + logprob_phi + next_phi = update_phi_score(next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)) + pad = pad.reshape((batch_size, 1)) + next_emit = pad * prev_emit + (1.0 - pad) * next_emit + next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi + return ((next_phi, next_emit), (next_phi, next_emit)) + xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0))) + (_, (logalpha_phi, logalpha_emit)) = jax.lax.scan(loop_body, (logalpha_phi_init, logalpha_emit_init), xs) + logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1]) + logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last) + _one_hot = jax.nn.one_hot(label_lengths, num_classes=max_label_length + 1) + per_seq_loss = -jnp.einsum('bn,bn->b', logalpha_phi_last, _one_hot) + return per_seq_loss + +def _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=True, mask_index=None): + inputs = convert_to_tensor(inputs) + sequence_lengths = convert_to_tensor(sequence_lengths, dtype='int32') + (batch_size, max_length, num_classes) = inputs.shape + if mask_index is None: + mask_index = num_classes - 1 + indices = jnp.argmax(inputs, axis=-1) + scores = jnp.max(inputs, axis=-1) + seqlen_mask = jnp.arange(max_length)[None, :] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] + indices = jnp.where(seqlen_mask, mask_index, indices) + scores = jnp.where(seqlen_mask, 0.0, scores) + if merge_repeated: + repeat_mask = indices[:, 1:] == indices[:, :-1] + repeat_mask = jnp.pad(repeat_mask, ((0, 0), (1, 0))) + indices = jnp.where(repeat_mask, mask_index, indices) + invalid_mask = indices == mask_index + indices = jnp.where(invalid_mask, -1, indices) + order = jnp.expand_dims(jnp.arange(max_length), axis=0) + order = jnp.tile(order, (batch_size, 1)) + order = jnp.where(invalid_mask, max_length, order) + order = jnp.argsort(order, axis=-1) + indices = jnp.take_along_axis(indices, order, axis=-1) + scores = -jnp.sum(scores, axis=1)[:, None] + indices = jnp.expand_dims(indices, axis=0) + return (indices, scores) + +def _ctc_beam_search_decode(inputs, sequence_lengths, beam_width=100, top_paths=1, mask_index=None): + inputs = convert_to_tensor(inputs) + sequence_lengths = convert_to_tensor(sequence_lengths) + (batch_size, max_seq_len, num_classes) = inputs.shape + inputs = jnn.log_softmax(inputs) + seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_lengths[:, None] + if mask_index is None: + mask_index = num_classes - 1 + inputs = jnp.flip(inputs, axis=2) + mask_index = num_classes - mask_index - 1 + _pad = -1 + init_paths = jnp.full((batch_size, 2 * beam_width, max_seq_len), _pad, dtype=jnp.int32) + num_init_paths = builtins.min(num_classes, beam_width) + max_classes = jnp.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:] + init_classes = jnp.where(max_classes == mask_index, _pad, max_classes) + init_paths = init_paths.at[:, :num_init_paths, 0].set(init_classes) + init_scores = jnp.full((batch_size, 2 * beam_width), -jnp.inf, dtype=inputs.dtype).at[:, :num_init_paths].set(jnp.take_along_axis(inputs[:, 0], max_classes, axis=1)) + init_masked = init_paths[:, :, 0] == _pad + + def _extend_paths(paths, scores, masked, x): + paths = jnp.repeat(paths, num_classes, axis=0) + scores = jnp.repeat(scores, num_classes) + masked = jnp.repeat(masked, num_classes) + path_tail_index = jnp.argmax(paths == _pad, axis=1) + paths_arange = jnp.arange(2 * beam_width * num_classes) + path_tails = paths[paths_arange, path_tail_index - 1] + path_tails = jnp.where(path_tail_index == 0, _pad, path_tails) + classes = jnp.arange(num_classes).at[mask_index].set(_pad) + classes = jnp.tile(classes, 2 * beam_width) + prev_masked = masked + masked = classes == _pad + masked_repeat = ~prev_masked & (path_tails == classes) + classes = jnp.where(masked_repeat, _pad, classes) + paths = paths.at[paths_arange, path_tail_index].set(classes) + x = jnp.tile(x, 2 * beam_width) + scores = scores + x + return (paths, scores, masked) + + def _merge_scores(unique_inverse, scores): + scores_max = jnp.max(scores) + scores_exp = jnp.exp(scores - scores_max) + scores = jnp.zeros_like(scores).at[unique_inverse].add(scores_exp) + scores = jnp.log(scores) + scores_max + return scores + + def _prune_paths(paths, scores, masked): + (paths, unique_inverse) = jnp.unique(paths, return_inverse=True, size=2 * num_classes * beam_width, axis=0, fill_value=_pad) + if len(unique_inverse.shape) >= 2: + unique_inverse = jnp.squeeze(unique_inverse, axis=1) + emit_scores = jnp.where(masked, -jnp.inf, scores) + mask_scores = jnp.where(masked, scores, -jnp.inf) + emit_scores = _merge_scores(unique_inverse, emit_scores) + mask_scores = _merge_scores(unique_inverse, mask_scores) + total_scores = jnp.logaddexp(emit_scores, mask_scores) + top_indices = jnp.argsort(total_scores)[-beam_width:] + paths = paths[top_indices] + emit_scores = emit_scores[top_indices] + mask_scores = mask_scores[top_indices] + paths = jnp.tile(paths, (2, 1)) + scores = jnp.concatenate([emit_scores, mask_scores]) + masked = jnp.concatenate([jnp.zeros(beam_width, bool), jnp.ones(beam_width, bool)]) + return (paths, scores, masked) + + def _decode_step(paths, scores, masked, x): + (paths, scores, masked) = _extend_paths(paths, scores, masked, x) + (paths, scores, masked) = _prune_paths(paths, scores, masked) + return (paths, scores, masked) + + def _step(prev, x): + (paths, scores, masked) = prev + (x, seqlen_mask) = x + (paths, scores, masked) = lax.cond(seqlen_mask, lambda paths, scores, masked, x: (paths, scores, masked), _decode_step, paths, scores, masked, x) + return ((paths, scores, masked), None) + + def _decode_batch(init_paths, init_scores, init_masked, inputs, seqlen_mask): + ((paths, scores, masked), _) = lax.scan(_step, (init_paths, init_scores, init_masked), (inputs[1:], seqlen_mask[1:])) + (paths, unique_inverse) = jnp.unique(paths, return_inverse=True, size=2 * num_classes * beam_width, axis=0, fill_value=_pad) + if len(unique_inverse.shape) >= 2: + unique_inverse = jnp.squeeze(unique_inverse, axis=1) + scores = _merge_scores(unique_inverse, scores) + top_indices = jnp.argsort(scores)[-top_paths:][::-1] + paths = paths[top_indices] + scores = scores[top_indices] + return (paths, scores) + (paths, scores) = jax.vmap(_decode_batch)(init_paths, init_scores, init_masked, inputs, seqlen_mask) + paths = jnp.where(paths == _pad, _pad, num_classes - paths - 1) + paths = jnp.transpose(paths, [1, 0, 2]) + return (paths, scores) + +def ctc_decode(inputs, sequence_lengths, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, 'float32') + inputs = cast(inputs, dtype) + if strategy == 'greedy': + return _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index) + elif strategy == 'beam_search': + return _ctc_beam_search_decode(inputs, sequence_lengths, beam_width=beam_width, top_paths=top_paths, mask_index=mask_index) + else: + raise ValueError(f"Invalid strategy {strategy}. Supported values are 'greedy' and 'beam_search'.") + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError(f'Input shapes {x1.shape} and {x2.shape} must match for PSNR calculation. ') + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = jnp.mean(jnp.square(x1 - x2)) + psnr = 20 * jnp.log10(max_val) - 10 * jnp.log10(mse) + return psnr + +# File: keras-master/keras/src/backend/jax/numpy.py +import builtins +import math +import jax.experimental.sparse as jax_sparse +import jax.numpy as jnp +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.jax import nn +from keras.src.backend.jax import sparse +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor + +@sparse.elementwise_binary_union(linear=True, use_sparsify=True) +def add(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.add(x1, x2) + +def bincount(x, weights=None, minlength=0, sparse=False): + if sparse or isinstance(x, jax_sparse.BCOO): + if isinstance(x, jax_sparse.BCOO): + if weights is not None: + if not isinstance(weights, jax_sparse.BCOO): + raise ValueError('`x` and `weights` must both be BCOOs') + if x.indices is not weights.indices: + if not jnp.all(jnp.equal(x.indices, weights.indices)): + raise ValueError('`x` and `weights` BCOOs must have the same indices') + weights = weights.data + x = x.data + reduction_axis = 1 if len(x.shape) > 1 else 0 + maxlength = jnp.maximum(jnp.max(x) + 1, minlength) + one_hot_encoding = nn.one_hot(x, maxlength, sparse=True) + if weights is not None: + expanded_weights = jnp.expand_dims(weights, reduction_axis + 1) + one_hot_encoding = one_hot_encoding * expanded_weights + outputs = jax_sparse.bcoo_reduce_sum(one_hot_encoding, axes=(reduction_axis,)) + return outputs + if len(x.shape) == 2: + if weights is None: + + def bincount_fn(arr): + return jnp.bincount(arr, minlength=minlength) + bincounts = list(map(bincount_fn, x)) + else: + + def bincount_fn(arr_w): + return jnp.bincount(arr_w[0], weights=arr_w[1], minlength=minlength) + bincounts = list(map(bincount_fn, zip(x, weights))) + return jnp.stack(bincounts) + return jnp.bincount(x, weights=weights, minlength=minlength) + +def einsum(subscripts, *operands, **kwargs): + operands = [convert_to_tensor(x) for x in operands] + dtypes = list(set((standardize_dtype(x.dtype) for x in operands))) + if len(dtypes) == 1 and dtypes[0] == 'int8': + preferred_element_type = 'int32' + else: + preferred_element_type = None + kwargs['preferred_element_type'] = preferred_element_type + return jnp.einsum(subscripts, *operands, **kwargs) + +@sparse.elementwise_binary_union(linear=True, use_sparsify=True) +def subtract(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.subtract(x1, x2) + +def matmul(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + x1_dtype = standardize_dtype(x1.dtype) + x2_dtype = standardize_dtype(x2.dtype) + if x1_dtype == 'int8' and x2_dtype == 'int8': + preferred_element_type = 'int32' + else: + preferred_element_type = None + if isinstance(x1, jax_sparse.JAXSparse) or isinstance(x2, jax_sparse.JAXSparse): + if not hasattr(matmul, 'sparse_matmul'): + matmul.sparse_matmul = jax_sparse.sparsify(jnp.matmul) + if isinstance(x1, jax_sparse.BCOO): + x1 = jax_sparse.bcoo_update_layout(x1, n_batch=len(x1.shape) - 2, on_inefficient='warn') + if isinstance(x2, jax_sparse.BCOO): + x2 = jax_sparse.bcoo_update_layout(x2, n_batch=len(x2.shape) - 2, on_inefficient='warn') + return matmul.sparse_matmul(x1, x2, preferred_element_type=preferred_element_type) + return jnp.matmul(x1, x2, preferred_element_type=preferred_element_type) + +def multiply(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + if isinstance(x1, jax_sparse.BCOO): + if isinstance(x2, jax_sparse.BCOO): + if x1.indices is x2.indices: + if not x1.unique_indices: + x1 = jax_sparse.bcoo_sum_duplicates(x1) + x2 = jax_sparse.bcoo_sum_duplicates(x2) + return jax_sparse.BCOO((jnp.multiply(x1.data, x2.data), x1.indices), shape=x1.shape, indices_sorted=True, unique_indices=True) + else: + return jax_sparse.bcoo_multiply_sparse(x1, x2) + else: + out_data = jax_sparse.bcoo_multiply_dense(x1, x2) + return jax_sparse.BCOO((out_data, x1.indices), shape=x1.shape, indices_sorted=x1.indices_sorted, unique_indices=x1.unique_indices) + elif isinstance(x2, jax_sparse.BCOO): + out_data = jax_sparse.bcoo_multiply_dense(x2, x1) + return jax_sparse.BCOO((out_data, x2.indices), shape=x2.shape, indices_sorted=x2.indices_sorted, unique_indices=x2.unique_indices) + return jnp.multiply(x1, x2) + +def mean(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + if 'int' in ori_dtype or ori_dtype == 'bool': + result_dtype = compute_dtype + else: + result_dtype = ori_dtype + if isinstance(x, jax_sparse.BCOO): + if axis is None: + axis = tuple(range(len(x.shape))) + (canonical_axis, keep_dims_shape, broadcast_dimensions) = sparse.axis_shape_dims_for_broadcast_in_dim(axis, x.shape, insert_dims=False) + divisor = math.prod((x.shape[i] for i in canonical_axis)) + output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis) + output = jax_sparse.BCOO((output.data.astype(result_dtype) / divisor, output.indices), shape=output.shape) + if keepdims: + output = jax_sparse.bcoo_broadcast_in_dim(output, shape=keep_dims_shape, broadcast_dimensions=broadcast_dimensions) + return output + else: + output = jnp.mean(x, axis=axis, keepdims=keepdims, dtype=compute_dtype) + return cast(output, result_dtype) + +def max(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + return jnp.max(x, axis=axis, keepdims=keepdims, initial=initial) + +def ones(shape, dtype=None): + dtype = dtype or config.floatx() + return jnp.ones(shape, dtype=dtype) + +def zeros(shape, dtype=None): + dtype = dtype or config.floatx() + return jnp.zeros(shape, dtype=dtype) + +@sparse.elementwise_unary(linear=False) +def absolute(x): + x = convert_to_tensor(x) + return jnp.absolute(x) + +def abs(x): + return absolute(x) + +def all(x, axis=None, keepdims=False): + return jnp.all(x, axis=axis, keepdims=keepdims) + +def any(x, axis=None, keepdims=False): + return jnp.any(x, axis=axis, keepdims=keepdims) + +def amax(x, axis=None, keepdims=False): + return jnp.amax(x, axis=axis, keepdims=keepdims) + +def amin(x, axis=None, keepdims=False): + return jnp.amin(x, axis=axis, keepdims=keepdims) + +def append(x1, x2, axis=None): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.append(x1, x2, axis=axis) + +def arange(start, stop=None, step=1, dtype=None): + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(step, 'dtype', type(step))] + if stop is not None: + dtypes_to_resolve.append(getattr(stop, 'dtype', type(stop))) + dtype = dtypes.result_type(*dtypes_to_resolve) + dtype = standardize_dtype(dtype) + return jnp.arange(start, stop, step=step, dtype=dtype) + +@sparse.densifying_unary +def arccos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arccos(x) + +@sparse.densifying_unary +def arccosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arccosh(x) + +@sparse.elementwise_unary(linear=False) +def arcsin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arcsin(x) + +@sparse.elementwise_unary(linear=False) +def arcsinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arcsinh(x) + +@sparse.elementwise_unary(linear=False) +def arctan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arctan(x) + +def arctan2(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = cast(x1, dtype) + x2 = cast(x2, dtype) + return jnp.arctan2(x1, x2) + +@sparse.elementwise_unary(linear=False) +def arctanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.arctanh(x) + +def argmax(x, axis=None, keepdims=False): + return jnp.argmax(x, axis=axis, keepdims=keepdims) + +def argmin(x, axis=None, keepdims=False): + return jnp.argmin(x, axis=axis, keepdims=keepdims) + +def argsort(x, axis=-1): + x = convert_to_tensor(x) + if x.ndim == 0: + return jnp.argsort(x, axis=None) + return jnp.argsort(x, axis=axis) + +def array(x, dtype=None): + return jnp.array(x, dtype=dtype) + +def average(x, axis=None, weights=None): + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype, float] + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + x = cast(x, dtype) + if weights is not None: + weights = cast(weights, dtype) + return jnp.average(x, weights=weights, axis=axis) + +def bitwise_and(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.bitwise_and(x, y) + +def bitwise_invert(x): + x = convert_to_tensor(x) + return jnp.invert(x) + +def bitwise_not(x): + return bitwise_invert(x) + +def bitwise_or(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.bitwise_or(x, y) + +def bitwise_xor(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.bitwise_xor(x, y) + +def bitwise_left_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.left_shift(x, y) + +def left_shift(x, y): + return bitwise_left_shift(x, y) + +def bitwise_right_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.right_shift(x, y) + +def right_shift(x, y): + return bitwise_right_shift(x, y) + +def broadcast_to(x, shape): + x = convert_to_tensor(x) + return jnp.broadcast_to(x, shape) + +@sparse.elementwise_unary(linear=False) +def ceil(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.ceil(x) + +def clip(x, x_min, x_max): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'int32') + return jnp.clip(x, x_min, x_max) + +def concatenate(xs, axis=0): + bcoo_count = builtins.sum((isinstance(x, jax_sparse.BCOO) for x in xs)) + if bcoo_count: + if bcoo_count == len(xs): + axis = canonicalize_axis(axis, len(xs[0].shape)) + return jax_sparse.bcoo_concatenate(xs, dimension=axis) + else: + xs = [x.todense() if isinstance(x, jax_sparse.JAXSparse) else x for x in xs] + return jnp.concatenate(xs, axis=axis) + +@sparse.elementwise_unary(linear=True) +def conjugate(x): + x = convert_to_tensor(x) + return jnp.conjugate(x) + +@sparse.elementwise_unary(linear=True) +def conj(x): + x = convert_to_tensor(x) + return jnp.conjugate(x) + +@sparse.elementwise_unary(linear=True) +def copy(x): + x = convert_to_tensor(x) + return jnp.copy(x) + +@sparse.densifying_unary +def cos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.cos(x) + +@sparse.densifying_unary +def cosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.cosh(x) + +def count_nonzero(x, axis=None): + return cast(jnp.count_nonzero(x, axis=axis), 'int32') + +def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.cross(x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) + +def cumprod(x, axis=None, dtype=None): + x = convert_to_tensor(x) + return jnp.cumprod(x, axis=axis, dtype=dtype) + +def cumsum(x, axis=None, dtype=None): + x = convert_to_tensor(x) + return jnp.cumsum(x, axis=axis, dtype=dtype) + +def diag(x, k=0): + x = convert_to_tensor(x) + return jnp.diag(x, k=k) + +def diagonal(x, offset=0, axis1=0, axis2=1): + x = convert_to_tensor(x) + return jnp.diagonal(x, offset=offset, axis1=axis1, axis2=axis2) + +def diff(a, n=1, axis=-1): + a = convert_to_tensor(a) + return jnp.diff(a, n=n, axis=axis) + +@sparse.elementwise_unary(linear=False) +def digitize(x, bins): + x = convert_to_tensor(x) + bins = convert_to_tensor(bins) + return jnp.digitize(x, bins) + +def dot(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return jnp.dot(x, y) + +def empty(shape, dtype=None): + dtype = dtype or config.floatx() + return jnp.empty(shape, dtype=dtype) + +def equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.equal(x1, x2) + +@sparse.densifying_unary +def exp(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = cast(x, config.floatx()) + return jnp.exp(x) + +def expand_dims(x, axis): + x = convert_to_tensor(x) + if isinstance(x, jax_sparse.BCOO): + (_, result_shape, broadcast_dimensions) = sparse.axis_shape_dims_for_broadcast_in_dim(axis, x.shape, insert_dims=True) + return jax_sparse.bcoo_broadcast_in_dim(x, shape=result_shape, broadcast_dimensions=broadcast_dimensions) + return jnp.expand_dims(x, axis) + +@sparse.elementwise_unary(linear=False) +def expm1(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = cast(x, config.floatx()) + return jnp.expm1(x) + +def flip(x, axis=None): + return jnp.flip(x, axis=axis) + +@sparse.elementwise_unary(linear=False) +def floor(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.floor(x) + +def full(shape, fill_value, dtype=None): + dtype = dtype or config.floatx() + return jnp.full(shape, fill_value, dtype=dtype) + +def full_like(x, fill_value, dtype=None): + return jnp.full_like(x, fill_value, dtype=dtype) + +def greater(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.greater(x1, x2) + +def greater_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.greater_equal(x1, x2) + +def hstack(xs): + return jnp.hstack(xs) + +def identity(n, dtype=None): + dtype = dtype or config.floatx() + return jnp.identity(n, dtype=dtype) + +@sparse.elementwise_unary(linear=True) +def imag(x): + x = convert_to_tensor(x) + return jnp.imag(x) + +def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.isclose(x1, x2, rtol, atol, equal_nan) + +@sparse.densifying_unary +def isfinite(x): + x = convert_to_tensor(x) + return jnp.isfinite(x) + +@sparse.elementwise_unary(linear=False) +def isinf(x): + x = convert_to_tensor(x) + return jnp.isinf(x) + +@sparse.elementwise_unary(linear=False) +def isnan(x): + x = convert_to_tensor(x) + return jnp.isnan(x) + +def less(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.less(x1, x2) + +def less_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.less_equal(x1, x2) + +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + return jnp.linspace(start, stop, num=num, endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis) + +@sparse.densifying_unary +def log(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.log(x) + +@sparse.densifying_unary +def log10(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.log10(x) + +@sparse.elementwise_unary(linear=False) +def log1p(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.log1p(x) + +@sparse.densifying_unary +def log2(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.log2(x) + +def logaddexp(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = cast(x1, dtype) + x2 = cast(x2, dtype) + return jnp.logaddexp(x1, x2) + +def logical_and(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.logical_and(x1, x2) + +def logical_not(x): + x = convert_to_tensor(x) + return jnp.logical_not(x) + +def logical_or(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.logical_or(x1, x2) + +def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): + return jnp.logspace(start, stop, num=num, endpoint=endpoint, base=base, dtype=dtype, axis=axis) + +@sparse.elementwise_binary_union(linear=False, use_sparsify=False) +def maximum(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.maximum(x1, x2) + +def median(x, axis=None, keepdims=False): + if isinstance(axis, list): + axis = tuple(axis) + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + result = jnp.median(x, axis=axis, keepdims=keepdims) + if keepdims is True and axis is None: + while result.ndim < x.ndim: + result = jnp.expand_dims(result, axis=-1) + return result + +def meshgrid(*x, indexing='xy'): + return jnp.meshgrid(*x, indexing=indexing) + +def min(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + return jnp.min(x, axis=axis, keepdims=keepdims, initial=initial) + +@sparse.elementwise_binary_union(linear=False, use_sparsify=False) +def minimum(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.minimum(x1, x2) + +def mod(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.mod(x1, x2) + +def moveaxis(x, source, destination): + return jnp.moveaxis(x, source=source, destination=destination) + +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + x = convert_to_tensor(x) + return jnp.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + +def ndim(x): + return jnp.ndim(x) + +def nonzero(x): + return jnp.nonzero(x) + +def not_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.not_equal(x1, x2) + +def ones_like(x, dtype=None): + return jnp.ones_like(x, dtype=dtype) + +def zeros_like(x, dtype=None): + return jnp.zeros_like(x, dtype=dtype) + +def outer(x1, x2): + return jnp.outer(x1, x2) + +def pad(x, pad_width, mode='constant', constant_values=None): + x = convert_to_tensor(x) + kwargs = {} + if constant_values is not None: + if mode != 'constant': + raise ValueError(f"Argument `constant_values` can only be provided when `mode == 'constant'`. Received: mode={mode}") + kwargs['constant_values'] = constant_values + return jnp.pad(x, pad_width, mode=mode, **kwargs) + +def prod(x, axis=None, keepdims=False, dtype=None): + x = convert_to_tensor(x) + return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype) + +def quantile(x, q, axis=None, method='linear', keepdims=False): + x = convert_to_tensor(x) + q = convert_to_tensor(q) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + result = jnp.quantile(x, q, axis=axis, method=method, keepdims=keepdims) + if keepdims is True and axis is None: + result_ndim = x.ndim + (1 if len(q.shape) > 0 else 0) + while result.ndim < result_ndim: + result = jnp.expand_dims(result, axis=-1) + return result + +def ravel(x): + x = convert_to_tensor(x) + return jnp.ravel(x) + +@sparse.elementwise_unary(linear=True) +def real(x): + x = convert_to_tensor(x) + return jnp.real(x) + +@sparse.densifying_unary +def reciprocal(x): + x = convert_to_tensor(x) + return jnp.reciprocal(x) + +def repeat(x, repeats, axis=None): + x = convert_to_tensor(x) + return jnp.repeat(x, repeats, axis=axis) + +def reshape(x, newshape): + if isinstance(x, jax_sparse.BCOO): + from keras.src.ops import operation_utils + output_shape = operation_utils.compute_reshape_output_shape(x.shape, newshape, 'new_shape') + if None not in output_shape: + newshape = output_shape + return jax_sparse.bcoo_reshape(x, new_sizes=newshape) + return jnp.reshape(x, newshape) + +def roll(x, shift, axis=None): + return jnp.roll(x, shift, axis=axis) + +def searchsorted(sorted_sequence, values, side='left'): + if ndim(sorted_sequence) != 1: + raise ValueError(f'`searchsorted` only supports 1-D sorted sequences. You can use `keras.ops.vectorized_map` to extend it to N-D sequences. Received: sorted_sequence.shape={sorted_sequence.shape}') + return jnp.searchsorted(sorted_sequence, values, side=side) + +@sparse.elementwise_unary(linear=False) +def sign(x): + x = convert_to_tensor(x) + return jnp.sign(x) + +@sparse.elementwise_unary(linear=False) +def sin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.sin(x) + +@sparse.elementwise_unary(linear=False) +def sinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.sinh(x) + +def size(x): + return jnp.size(x) + +def sort(x, axis=-1): + x = convert_to_tensor(x) + return jnp.sort(x, axis=axis) + +def split(x, indices_or_sections, axis=0): + return jnp.split(x, indices_or_sections, axis=axis) + +def stack(x, axis=0): + return jnp.stack(x, axis=axis) + +def std(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.std(x, axis=axis, keepdims=keepdims) + +def swapaxes(x, axis1, axis2): + x = convert_to_tensor(x) + return jnp.swapaxes(x, axis1=axis1, axis2=axis2) + +def take(x, indices, axis=None): + x = convert_to_tensor(x) + indices = convert_to_tensor(indices, sparse=False) + return jnp.take(x, indices, axis=axis) + +def take_along_axis(x, indices, axis=None): + return jnp.take_along_axis(x, indices, axis=axis) + +@sparse.elementwise_unary(linear=False) +def tan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.tan(x) + +@sparse.elementwise_unary(linear=False) +def tanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return jnp.tanh(x) + +def tensordot(x1, x2, axes=2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.tensordot(x1, x2, axes=axes) + +@sparse.elementwise_unary(linear=False) +def round(x, decimals=0): + x = convert_to_tensor(x) + x_dtype = standardize_dtype(x.dtype) + if 'int' in x_dtype and decimals < 0: + factor = cast(math.pow(10, decimals), config.floatx()) + x = cast(x, config.floatx()) + x = jnp.multiply(x, factor) + x = jnp.round(x) + x = jnp.divide(x, factor) + return cast(x, x_dtype) + else: + return jnp.round(x, decimals=decimals) + +def tile(x, repeats): + return jnp.tile(x, repeats) + +def trace(x, offset=0, axis1=0, axis2=1): + x = convert_to_tensor(x) + dtype = None + if standardize_dtype(x.dtype) in ('bool', 'uint8', 'uint16'): + dtype = 'int32' + return jnp.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype) + +def tri(N, M=None, k=0, dtype=None): + dtype = dtype or config.floatx() + return jnp.tri(N, M=M, k=k, dtype=dtype) + +def tril(x, k=0): + x = convert_to_tensor(x) + return jnp.tril(x, k=k) + +def triu(x, k=0): + x = convert_to_tensor(x) + return jnp.triu(x, k=k) + +def trunc(x): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if 'int' in dtype or 'bool' == dtype: + return x + return jnp.trunc(x) + +def vdot(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.vdot(x1, x2) + +def vstack(xs): + return jnp.vstack(xs) + +def vectorize(pyfunc, *, excluded=None, signature=None): + if excluded is None: + excluded = set() + return jnp.vectorize(pyfunc, excluded=excluded, signature=signature) + +def where(condition, x1, x2): + return jnp.where(condition, x1, x2) + +@sparse.elementwise_division +def divide(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.divide(x1, x2) + +def divide_no_nan(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + safe_x2 = jnp.where(x2 == 0, 1, x2) + return jnp.where(x2 == 0, 0, jnp.divide(x1, safe_x2)) + +def true_divide(x1, x2): + return divide(x1, x2) + +def power(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.power(x1, x2) + +@sparse.elementwise_unary(linear=True) +def negative(x): + x = convert_to_tensor(x) + return jnp.negative(x) + +@sparse.elementwise_unary(linear=False) +def square(x): + x = convert_to_tensor(x) + return jnp.square(x) + +@sparse.elementwise_unary(linear=False) +def sqrt(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return jnp.sqrt(x) + +def squeeze(x, axis=None): + if isinstance(x, jax_sparse.BCOO): + if axis is None: + axis = tuple((i for (i, d) in enumerate(x.shape) if d == 1)) + axis = to_tuple_or_list(axis) + return jax_sparse.bcoo_squeeze(x, dimensions=axis) + return jnp.squeeze(x, axis=axis) + +def transpose(x, axes=None): + x = convert_to_tensor(x) + if isinstance(x, jax_sparse.BCOO): + num_dims = len(x.shape) + if axes is None: + permutation = tuple(range(num_dims)[::-1]) + else: + permutation = [] + for a in axes: + a = canonicalize_axis(a, num_dims) + permutation.append(a) + return jax_sparse.bcoo_transpose(x, permutation=permutation) + return jnp.transpose(x, axes=axes) + +def var(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + return cast(jnp.var(x, axis=axis, keepdims=keepdims, dtype=compute_dtype), result_dtype) + +def sum(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if isinstance(x, jax_sparse.BCOO): + if axis is None: + axis = tuple(range(len(x.shape))) + (canonical_axis, keep_dims_shape, broadcast_dimensions) = sparse.axis_shape_dims_for_broadcast_in_dim(axis, x.shape, insert_dims=False) + output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis) + if keepdims: + output = jax_sparse.bcoo_broadcast_in_dim(output, shape=keep_dims_shape, broadcast_dimensions=broadcast_dimensions) + return output + return jnp.sum(x, axis=axis, keepdims=keepdims) + +def eye(N, M=None, k=0, dtype=None): + dtype = dtype or config.floatx() + return jnp.eye(N, M=M, k=k, dtype=dtype) + +def floor_divide(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.floor_divide(x1, x2) + +def logical_xor(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.logical_xor(x1, x2) + +def correlate(x1, x2, mode='valid'): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return jnp.correlate(x1, x2, mode) + +def select(condlist, choicelist, default=0): + return jnp.select(condlist, choicelist, default=default) + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(jnp.linalg.slogdet(x)) + +def argpartition(x, kth, axis=-1): + return jnp.argpartition(x, kth, axis) + +# File: keras-master/keras/src/backend/jax/optimizer.py +import jax +from jax import numpy as jnp +from keras.src.optimizers import base_optimizer + +class JaxOptimizer(base_optimizer.BaseOptimizer): + + def _backend_apply_gradients(self, grads, trainable_variables): + if self.gradient_accumulation_steps: + is_update_step = (self._iterations + 1) % self.gradient_accumulation_steps == 0 + steps = self.gradient_accumulation_steps + current_trainable_vars_value = [v.value for v in trainable_variables] + current_optimizer_vars_value = [v.value for v in self.variables] + acc_grads = [self._accumulated_gradients[self._get_variable_index(v)] for v in trainable_variables] + new_g_accs = jax.lax.cond(is_update_step, lambda : [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads], lambda : [g + acc_g for (g, acc_g) in zip(grads, acc_grads)]) + grads = jax.lax.cond(is_update_step, lambda : [(g + acc_g) / steps for (g, acc_g) in zip(grads, acc_grads)], lambda : list(grads)) + grads = self._clip_gradients(grads) + self._apply_weight_decay(trainable_variables) + self._backend_update_step(grads, trainable_variables, self.learning_rate) + new_trainable_vars = jax.lax.cond(is_update_step, lambda : [v.value for v in trainable_variables], lambda : current_trainable_vars_value) + new_opt_vars = jax.lax.cond(is_update_step, lambda : [v.value for v in self.variables], lambda : current_optimizer_vars_value) + for (value, v) in zip(new_trainable_vars, trainable_variables): + v.assign(value) + for (value, v) in zip(new_opt_vars, self.variables): + v.assign(value) + for (n_g_acc, g_acc) in zip(new_g_accs, acc_grads): + g_acc.assign(n_g_acc) + else: + grads = self._clip_gradients(grads) + self._apply_weight_decay(trainable_variables) + self._backend_update_step(grads, trainable_variables, self.learning_rate) + if self.use_ema: + self._update_model_variables_moving_average(self._trainable_variables) + if self.ema_overwrite_frequency is not None: + should_overwrite_model_vars = (self.iterations + 1) % self.ema_overwrite_frequency == 0 + should_overwrite_model_vars_int = should_overwrite_model_vars.astype('int32') + should_not_overwrite_model_vars_int = jnp.logical_not(should_overwrite_model_vars).astype('int32') + current_trainable_vars_value = [v.value for v in self._trainable_variables] + for (var, average_var) in zip(self._trainable_variables, self._model_variables_moving_average): + var.assign(average_var * should_overwrite_model_vars_int + var.value * should_not_overwrite_model_vars_int) + self._iterations.assign_add(1) + +# File: keras-master/keras/src/backend/jax/random.py +import jax +from keras.src.backend.config import floatx +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed + +def jax_draw_seed(seed): + if isinstance(seed, jax.Array): + return seed + else: + return draw_seed(seed) + +def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = jax_draw_seed(seed) + sample = jax.random.normal(seed, shape=shape, dtype=dtype) + return sample * stddev + mean + +def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = jax_draw_seed(seed) + return jax.random.uniform(seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval) + +def categorical(logits, num_samples, dtype='int32', seed=None): + seed = jax_draw_seed(seed) + output_shape = list(logits.shape) + output_shape[1] = num_samples + output_shape = tuple(output_shape) + output = jax.random.categorical(seed, logits[..., None], shape=output_shape, axis=1) + return output.astype(dtype) + +def randint(shape, minval, maxval, dtype='int32', seed=None): + seed = jax_draw_seed(seed) + return jax.random.randint(seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval) + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = jax_draw_seed(seed) + sample = jax.random.truncated_normal(seed, shape=shape, lower=-2.0, upper=2.0, dtype=dtype) + return sample * stddev + mean + +def _get_concrete_noise_shape(inputs, noise_shape): + if noise_shape is None: + return inputs.shape + concrete_inputs_shape = inputs.shape + concrete_noise_shape = [] + for (i, value) in enumerate(noise_shape): + concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value) + return concrete_noise_shape + +def dropout(inputs, rate, noise_shape=None, seed=None): + seed = jax_draw_seed(seed) + keep_prob = 1.0 - rate + noise_shape = _get_concrete_noise_shape(inputs, noise_shape) + mask = jax.random.bernoulli(seed, p=keep_prob, shape=noise_shape) + mask = jax.numpy.broadcast_to(mask, inputs.shape) + return jax.lax.select(mask, inputs / keep_prob, jax.numpy.zeros_like(inputs)) + +def shuffle(x, axis=0, seed=None): + seed = jax_draw_seed(seed) + return jax.random.permutation(seed, x, axis, independent=True) + +def gamma(shape, alpha, dtype=None, seed=None): + seed = jax_draw_seed(seed) + dtype = dtype or floatx() + return jax.random.gamma(seed, alpha, shape=shape, dtype=dtype) + +def binomial(shape, counts, probabilities, dtype=None, seed=None): + dtype = dtype or floatx() + seed = jax_draw_seed(seed) + counts = jax.numpy.array(counts) + probabilities = jax.numpy.array(probabilities) + sample = jax.random.binomial(key=seed, n=counts, p=probabilities, shape=shape, dtype=dtype) + return sample + +def beta(shape, alpha, beta, dtype=None, seed=None): + dtype = dtype or floatx() + seed = jax_draw_seed(seed) + alpha = jax.numpy.array(alpha) + beta = jax.numpy.array(beta) + sample = jax.random.beta(key=seed, a=alpha, b=beta, shape=shape, dtype=dtype) + return sample + +# File: keras-master/keras/src/backend/jax/rnn.py +import contextlib +from jax import lax +from jax import numpy as jnp +from keras.src import tree +from keras.src.backend.common import stateless_scope + +def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False, return_all_outputs=True): + + def swap_batch_timestep(input_t): + axes = list(range(len(input_t.shape))) + (axes[0], axes[1]) = (1, 0) + return jnp.transpose(input_t, axes) + if not time_major: + inputs = tree.map_structure(swap_batch_timestep, inputs) + flattened_inputs = tree.flatten(inputs) + time_steps = flattened_inputs[0].shape[0] + if mask is not None: + if mask.dtype != 'bool': + mask = mask.astype('bool') + if len(mask.shape) == 2: + mask = jnp.expand_dims(mask, axis=-1) + if not time_major: + mask = swap_batch_timestep(mask) + if constants is None: + constants = [] + + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tree.is_nested(mask_t): + raise ValueError(f'mask_t is expected to be tensor, but got {mask_t}') + if tree.is_nested(input_t): + raise ValueError(f'input_t is expected to be tensor, but got {input_t}') + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = jnp.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:]) + return jnp.tile(mask_t, multiples) + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + def _process_single_input_t(input_t): + input_t = unstack(input_t) + if go_backwards: + input_t.reverse() + return input_t + if tree.is_nested(inputs): + processed_input = tree.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tree.pack_sequence_as(inputs, inp) + if mask is not None: + mask_list = unstack(mask) + if go_backwards: + mask_list.reverse() + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + (output, new_states) = step_function(inp, tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + if not successive_outputs: + prev_output = jnp.zeros_like(output) + else: + prev_output = successive_outputs[-1] + output = jnp.where(tiled_mask_t, output, prev_output) + flat_states = tree.flatten(states) + flat_new_states = tree.flatten(new_states) + tiled_mask_t = tuple((_expand_mask(mask_t, s) for s in flat_states)) + flat_final_states = tuple((jnp.where(m, s, ps) for (m, s, ps) in zip(tiled_mask_t, flat_new_states, flat_states))) + states = tree.pack_sequence_as(states, flat_final_states) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = jnp.stack(successive_outputs) + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + (output, states) = step_function(inp, tuple(states) + tuple(constants)) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = jnp.stack(successive_outputs) + else: + if mask is not None: + + def _step(states, current_input): + (current_input, current_mask) = current_input + is_masked = jnp.all(jnp.logical_not(current_mask), axis=-1, keepdims=True) + (output_t, new_states) = step_function(current_input, states) + if zero_output_for_mask: + masked_outs = jnp.where(is_masked, jnp.zeros_like(output_t), output_t) + else: + output_tm1 = states[0] + masked_outs = jnp.where(is_masked, output_tm1, output_t) + new_states = [jnp.where(is_masked, s, ns) for (s, ns) in zip(states, new_states)] + return (new_states, masked_outs) + scan_xs = (inputs, mask) + else: + + def _step(states, current_input): + (output_t, new_states) = step_function(current_input, states) + return (new_states, output_t) + scan_xs = inputs + if stateless_scope.in_stateless_scope(): + scope = contextlib.nullcontext() + else: + scope = stateless_scope.StatelessScope() + with scope: + (new_states, outputs) = lax.scan(f=_step, init=initial_states, xs=scan_xs, reverse=go_backwards) + if go_backwards: + outputs = jnp.flip(outputs, axis=0) + last_output = outputs[-1] + if not time_major: + outputs = tree.map_structure(swap_batch_timestep, outputs) + return (last_output, outputs, new_states) + +def cudnn_ok(*args, **kwargs): + return False + +def lstm(*args, **kwargs): + raise NotImplementedError + +def gru(*args, **kwargs): + raise NotImplementedError + +def unstack(x, axis=0): + return [lax.index_in_dim(x, i, axis, keepdims=False) for i in range(x.shape[axis])] + +# File: keras-master/keras/src/backend/jax/sparse.py +import functools +import jax.experimental.sparse as jax_sparse +import jax.numpy as jnp +from keras.src.utils import jax_utils + +def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims): + if axis is None: + raise ValueError('Received `None` value for `axis`') + if isinstance(axis, int): + axis = (axis,) + if len(set(axis)) != len(axis): + raise ValueError(f'Repeated axis in `axis`: {axis}') + result_dims = len(input_shape) + if insert_dims: + result_dims += len(axis) + canonical_axis = [] + for a in axis: + if not -result_dims <= a < result_dims: + raise ValueError(f'In `axis`, axis {a} is out of bounds for array of dimension {result_dims}') + if a < 0: + a = a + result_dims + canonical_axis.append(a) + if len(set(canonical_axis)) != len(canonical_axis): + raise ValueError(f'Repeated axis in `axis`: {canonical_axis}') + canonical_axis = sorted(canonical_axis) + output_shape = list(input_shape) + for i in canonical_axis: + if insert_dims: + output_shape.insert(i, 1) + else: + output_shape[i] = 1 + broadcast_dims = [i for i in range(result_dims) if i not in canonical_axis] + return (canonical_axis, output_shape, broadcast_dims) + +def bcoo_add_indices(x1, x2, sum_duplicates): + x2_zeros = jnp.zeros(x2.data.shape, x1.data.dtype) + concat_axis = len(x1.indices.shape) - 2 + output_indices = jnp.concatenate([x1.indices, x2.indices], axis=concat_axis) + output_data = jnp.concatenate([x1.data, x2_zeros], axis=concat_axis) + output = jax_sparse.BCOO((output_data, output_indices), shape=x1.shape) + if sum_duplicates: + output = jax_sparse.bcoo_sum_duplicates(output) + return output + +def densifying_unary(func): + + @functools.wraps(func) + def sparse_wrapper(x, *args, **kwargs): + if isinstance(x, jax_sparse.JAXSparse): + x = x.todense() + return func(x, *args, **kwargs) + return sparse_wrapper + +def elementwise_unary(linear): + + def wrap_elementwise_unary(func): + + @functools.wraps(func) + def sparse_wrapper(x, *args, **kwargs): + if isinstance(x, jax_sparse.BCOO): + if not linear and (not x.unique_indices): + x = jax_sparse.bcoo_sum_duplicates(x) + return jax_sparse.BCOO((func(x.data, *args, **kwargs), x.indices), shape=x.shape) + else: + return func(x, *args, **kwargs) + return sparse_wrapper + return wrap_elementwise_unary + +def elementwise_binary_union(linear, use_sparsify): + + def wrap_elementwise_binary_union(func): + sparse_func = jax_sparse.sparsify(func) if use_sparsify else None + + @functools.wraps(func) + def sparse_wrapper(x1, x2): + if isinstance(x1, jax_sparse.JAXSparse): + if isinstance(x2, jax_sparse.JAXSparse): + if x1.indices is x2.indices and isinstance(x1, jax_sparse.BCOO) and isinstance(x2, jax_sparse.BCOO): + if not linear and (not x1.unique_indices): + x1 = jax_sparse.bcoo_sum_duplicates(x1) + x2 = jax_sparse.bcoo_sum_duplicates(x2) + return jax_sparse.BCOO((func(x1.data, x2.data), x1.indices), shape=x1.shape, indices_sorted=x1.indices_sorted, unique_indices=x1.unique_indices) + elif use_sparsify: + return sparse_func(x1, x2) + elif isinstance(x1, jax_sparse.BCOO) and isinstance(x2, jax_sparse.BCOO): + x1 = bcoo_add_indices(x1, x2, sum_duplicates=not linear) + x2 = bcoo_add_indices(x2, x1, sum_duplicates=not linear) + return jax_sparse.BCOO((func(x1.data, x2.data), x1.indices), shape=x1.shape, indices_sorted=True, unique_indices=True) + else: + ValueError(f'Unsupported sparse format: {x1.__class__} and {x2.__class__}') + else: + x1 = x1.todense() + elif isinstance(x2, jax_sparse.JAXSparse): + x2 = x2.todense() + return func(x1, x2) + return sparse_wrapper + return wrap_elementwise_binary_union + +def elementwise_division(func): + sparse_func = jax_sparse.sparsify(func) + + @functools.wraps(func) + def sparse_wrapper(x1, x2): + if isinstance(x1, jax_sparse.JAXSparse): + if isinstance(x2, jax_sparse.JAXSparse): + x1 = x1.todense() + x2 = x2.todense() + elif isinstance(x1, jax_sparse.BCOO): + if not hasattr(x2, 'shape') or len(x2.shape) == 0: + return jax_sparse.BCOO((func(x1.data, x2), x1.indices), shape=x1.shape, indices_sorted=x1.indices_sorted, unique_indices=x1.unique_indices) + else: + if not jax_utils.is_in_jax_tracing_scope(x2): + x2_zeros_and_nans = jnp.equal(x2, 0) + if not jnp.issubdtype(x2.dtype, jnp.integer): + x2_zeros_and_nans = jnp.logical_or(x2_zeros_and_nans, jnp.isnan(x2)) + x2_zeros_and_nans = jax_sparse.bcoo_fromdense(x2_zeros_and_nans, n_batch=x1.n_batch, n_dense=x1.n_dense, index_dtype=x1.indices.dtype) + x1 = bcoo_add_indices(x1, x2_zeros_and_nans, sum_duplicates=True) + return sparse_func(x1, x2) + else: + raise ValueError(f'Unsupported sparse format: {x1.__class__}') + elif isinstance(x2, jax_sparse.JAXSparse): + x2 = x2.todense() + return func(x1, x2) + return sparse_wrapper + +# File: keras-master/keras/src/backend/jax/trainer.py +import collections +import itertools +from functools import partial +import jax +import numpy as np +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.backend import distribution_lib as jax_distribution_lib +from keras.src.distribution import distribution_lib +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils + +class JAXTrainer(base_trainer.Trainer): + + def __init__(self): + super().__init__() + self.train_function = None + self.test_function = None + self.predict_function = None + self._jax_state_synced = True + + def compute_loss_and_updates(self, trainable_variables, non_trainable_variables, metrics_variables, x, y, sample_weight, training=False, optimizer_variables=None): + kwargs = {} + if self._call_has_training_arg: + kwargs['training'] = training + (y_pred, non_trainable_variables, losses) = self.stateless_call(trainable_variables, non_trainable_variables, x, return_losses=True, **kwargs) + if losses: + self._losses_override.clear() + self._losses_override = losses + (loss, variables) = self.stateless_compute_loss(trainable_variables, non_trainable_variables, metrics_variables, x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=training) + if losses: + self._losses_override.clear() + (trainable_variables, non_trainable_variables, metrics_variables) = variables + unscaled_loss = loss + if training and self.optimizer is not None: + mapping = list(zip(self.optimizer.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping): + loss = self.optimizer.scale_loss(loss) + return (loss, (unscaled_loss, y_pred, non_trainable_variables, metrics_variables)) + + def train_step(self, state, data): + (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) = state + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + grad_fn = jax.value_and_grad(self.compute_loss_and_updates, has_aux=True) + ((loss, aux), grads) = grad_fn(trainable_variables, non_trainable_variables, metrics_variables, x, y, sample_weight, training=True, optimizer_variables=optimizer_variables) + (unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = aux + (trainable_variables, optimizer_variables) = self.optimizer.stateless_apply(optimizer_variables, grads, trainable_variables) + with backend.StatelessScope(state_mapping=[(ref_v, v) for (ref_v, v) in zip(self.metrics_variables, metrics_variables)]) as scope: + self._loss_tracker.update_state(unscaled_loss, sample_weight=tree.flatten(x)[0].shape[0]) + logs = self.compute_metrics(x, y, y_pred, sample_weight) + new_metrics_variables = [] + for ref_v in self.metrics_variables: + new_v = scope.get_current_value(ref_v) + if new_v is None: + new_v = ref_v.value + new_metrics_variables.append(new_v) + metrics_variables = new_metrics_variables + state = self._enforce_jax_state_sharding(trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) + return (logs, state) + + def test_step(self, state, data): + (trainable_variables, non_trainable_variables, metrics_variables) = state + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + (loss, aux) = self.compute_loss_and_updates(trainable_variables, non_trainable_variables, metrics_variables, x, y, sample_weight, training=False) + (unscaled_loss, y_pred, non_trainable_variables, metrics_variables) = aux + with backend.StatelessScope(state_mapping=[(ref_v, v) for (ref_v, v) in zip(self.metrics_variables, metrics_variables)]) as scope: + self._loss_tracker.update_state(unscaled_loss, sample_weight=tree.flatten(x)[0].shape[0]) + logs = self.compute_metrics(x, y, y_pred, sample_weight) + new_metrics_variables = [] + for ref_v in self.metrics_variables: + new_v = scope.get_current_value(ref_v) + if new_v is None: + new_v = ref_v.value + new_metrics_variables.append(new_v) + metrics_variables = new_metrics_variables + (trainable_variables, non_trainable_variables, _, metrics_variables) = self._enforce_jax_state_sharding(trainable_variables=trainable_variables, non_trainable_variables=non_trainable_variables, optimizer_variables=None, metrics_variables=metrics_variables) + state = (trainable_variables, non_trainable_variables, metrics_variables) + return (logs, state) + + def predict_step(self, state, data): + (trainable_variables, non_trainable_variables) = state + kwargs = {} + if self._call_has_training_arg: + kwargs['training'] = False + (x, _, _) = data_adapter_utils.unpack_x_y_sample_weight(data) + (outputs, non_trainable_variables) = self.stateless_call(trainable_variables, non_trainable_variables, x, **kwargs) + (_, non_trainable_variables, _, _) = self._enforce_jax_state_sharding(trainable_variables=None, non_trainable_variables=non_trainable_variables, optimizer_variables=None, metrics_variables=None) + return (outputs, non_trainable_variables) + + def make_train_function(self, force=False): + if self.train_function is not None and (not force): + return + + def one_train_step(state, data): + data = data[0] + return self.train_step(state, data) + + def multi_train_steps(state, data): + for single_step_data in data: + (logs, state) = one_train_step(state, [single_step_data]) + return (logs, state) + if self.steps_per_execution > 1: + train_step = multi_train_steps + else: + train_step = one_train_step + if not self.run_eagerly and self.jit_compile: + + @partial(jax.jit, donate_argnames='state') + def compiled_train_step(state, data): + return train_step(state, data) + self.train_function = compiled_train_step + else: + self.train_function = train_step + + def make_test_function(self, force=False): + if self.test_function is not None and (not force): + return + + def one_test_step(state, data): + data = data[0] + return self.test_step(state, data) + + def multi_test_steps(state, data): + for single_step_data in data: + (logs, state) = one_test_step(state, [single_step_data]) + return (logs, state) + if self.steps_per_execution > 1: + test_step = multi_test_steps + else: + test_step = one_test_step + if not self.run_eagerly and self.jit_compile: + + @partial(jax.jit, donate_argnames='state') + def compiled_test_step(state, data): + return test_step(state, data) + self.test_function = compiled_test_step + else: + self.test_function = test_step + + def make_predict_function(self, force=False): + if self.predict_function is not None and (not force): + return self.predict_function + + def one_predict_step(state, data): + data = data[0] + return self.predict_step(state, data) + + def multi_predict_steps(state, data): + (outputs, trainable_variables) = one_predict_step(state, data[:1]) + for single_step_data in data[1:]: + (step_outputs, trainable_variables) = one_predict_step(state, [single_step_data]) + outputs = tree.map_structure(lambda t1, t2: jax.numpy.concatenate([t1, t2]), outputs, step_outputs) + return (outputs, trainable_variables) + if self.steps_per_execution > 1: + predict_step = multi_predict_steps + else: + predict_step = one_predict_step + if not self.run_eagerly and self.jit_compile: + + @jax.jit + def compiled_predict_step(state, data): + return predict_step(state, data) + self.predict_function = compiled_predict_step + else: + self.predict_function = predict_step + + @traceback_utils.filter_traceback + def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1): + self._assert_compile_called('fit') + self._eval_epoch_iterator = None + if validation_split and validation_data is None: + ((x, y, sample_weight), validation_data) = array_slicing.train_validation_split((x, y, sample_weight), validation_split=validation_split) + if validation_data is not None: + (val_x, val_y, val_sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(validation_data) + epoch_iterator = JAXEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, shuffle=shuffle, class_weight=class_weight, steps_per_execution=self.steps_per_execution) + self._symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=epochs, steps=epoch_iterator.num_batches, model=self) + self._record_training_state_sharding_spec() + self.make_train_function() + self.stop_training = False + callbacks.on_train_begin() + initial_epoch = self._initial_epoch or initial_epoch + for epoch in range(initial_epoch, epochs): + self.reset_metrics() + callbacks.on_epoch_begin(epoch) + self._jax_state_synced = True + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_train_batch_begin(step) + if self._jax_state_synced: + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True, optimizer_variables=True, metrics_variables=True, purge_model_variables=True) + self._jax_state_synced = False + (logs, state) = self.train_function(state, data) + (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) = state + self._jax_state = {'trainable_variables': trainable_variables, 'non_trainable_variables': non_trainable_variables, 'optimizer_variables': optimizer_variables, 'metrics_variables': metrics_variables} + logs = self._pythonify_logs(logs) + callbacks.on_train_batch_end(step, logs) + if self.stop_training: + break + self.jax_state_sync() + with jax.spmd_mode('allow_all'): + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) + if validation_data is not None and self._should_eval(epoch, validation_freq): + if getattr(self, '_eval_epoch_iterator', None) is None: + self._eval_epoch_iterator = JAXEpochIterator(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps_per_execution=self.steps_per_execution, steps_per_epoch=validation_steps, shuffle=False) + val_logs = self.evaluate(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps=validation_steps, callbacks=callbacks, return_dict=True, _use_cached_eval_dataset=True) + val_logs = {'val_' + name: val for (name, val) in val_logs.items()} + epoch_logs.update(val_logs) + callbacks.on_epoch_end(epoch, epoch_logs) + training_logs = epoch_logs + if self.stop_training: + break + if isinstance(self.optimizer, optimizers_module.Optimizer) and epochs > 0: + self.optimizer.finalize_variable_values(self.trainable_weights) + if getattr(self, '_eval_epoch_iterator', None) is not None: + del self._eval_epoch_iterator + callbacks.on_train_end(logs=training_logs) + self._jax_state = None + return self.history + + @traceback_utils.filter_traceback + def evaluate(self, x=None, y=None, batch_size=None, verbose='auto', sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs): + self._assert_compile_called('evaluate') + use_cached_eval_dataset = kwargs.pop('_use_cached_eval_dataset', False) + if kwargs: + raise ValueError(f'Arguments not recognized: {kwargs}') + if use_cached_eval_dataset: + epoch_iterator = self._eval_epoch_iterator + else: + epoch_iterator = JAXEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + self._symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + self._record_training_state_sharding_spec() + self.make_test_function() + self.stop_evaluating = False + callbacks.on_test_begin() + logs = {} + self.reset_metrics() + self._jax_state_synced = True + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_test_batch_begin(step) + if self._jax_state_synced: + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True, metrics_variables=True, purge_model_variables=True) + self._jax_state_synced = False + (logs, state) = self.test_function(state, data) + (trainable_variables, non_trainable_variables, metrics_variables) = state + self._jax_state = {'trainable_variables': trainable_variables, 'non_trainable_variables': non_trainable_variables, 'metrics_variables': metrics_variables} + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) + if self.stop_evaluating: + break + self.jax_state_sync() + with jax.spmd_mode('allow_all'): + logs = self._get_metrics_result_or_logs(logs) + callbacks.on_test_end(logs) + self._jax_state = None + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + @traceback_utils.filter_traceback + def predict(self, x, batch_size=None, verbose='auto', steps=None, callbacks=None): + epoch_iterator = JAXEpochIterator(x=x, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + if not all((layer.built for layer in self._flatten_layers())): + for (_, data) in epoch_iterator.enumerate_epoch(): + (x, _, _) = data_adapter_utils.unpack_x_y_sample_weight(data[0]) + with backend.StatelessScope(): + self(x) + break + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + self._record_training_state_sharding_spec() + self.make_predict_function() + self.stop_predicting = False + callbacks.on_predict_begin() + + def append_to_outputs(batch_outputs, outputs): + if outputs is None: + outputs = tree.map_structure(lambda batch_output: [batch_output], batch_outputs) + else: + tree.map_structure_up_to(batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs) + return outputs + self._jax_state_synced = True + outputs = None + non_trainable_variables = None + for (step, x) in epoch_iterator.enumerate_epoch(): + callbacks.on_predict_batch_begin(step) + if self._jax_state_synced: + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True) + self._purge_model_variables(non_trainable_variables=True) + self._jax_state_synced = False + else: + state = (state[0], non_trainable_variables) + (batch_outputs, non_trainable_variables) = self.predict_function(state, x) + outputs = append_to_outputs(batch_outputs, outputs) + callbacks.on_predict_batch_end(step, {'outputs': batch_outputs}) + if self.stop_predicting: + break + self._jax_state = {'non_trainable_variables': non_trainable_variables} + self.jax_state_sync() + callbacks.on_predict_end() + self._jax_state = None + return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs) + + def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False): + self._assert_compile_called('train_on_batch') + if class_weight is not None: + if sample_weight is not None: + raise ValueError(f'Arguments `sample_weight` and `class_weight` cannot be specified at the same time. Received: sample_weight={sample_weight}, class_weight={class_weight}') + sample_weight = data_adapter_utils.class_weight_to_sample_weights(y, class_weight) + data = (x, y, sample_weight) + data = _distribute_data(data) + self._symbolic_build(data_batch=data) + self._record_training_state_sharding_spec() + self.make_train_function() + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True, optimizer_variables=True, metrics_variables=True, purge_model_variables=False) + self._jax_state_synced = False + (logs, state) = self.train_function(state, [data]) + (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) = state + self._jax_state = {'trainable_variables': trainable_variables, 'non_trainable_variables': non_trainable_variables, 'optimizer_variables': optimizer_variables, 'metrics_variables': metrics_variables} + self.jax_state_sync() + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False): + self._assert_compile_called('test_on_batch') + data = (x, y, sample_weight) + data = _distribute_data(data) + self._symbolic_build(data_batch=data) + self._record_training_state_sharding_spec() + self.make_test_function() + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True, metrics_variables=True, purge_model_variables=False) + self._jax_state_synced = False + (logs, state) = self.test_function(state, [data]) + (trainable_variables, non_trainable_variables, metrics_variables) = state + self._jax_state = {'trainable_variables': trainable_variables, 'non_trainable_variables': non_trainable_variables, 'metrics_variables': metrics_variables} + self.jax_state_sync() + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def predict_on_batch(self, x): + if not all((layer.built for layer in self._flatten_layers())): + with backend.StatelessScope(): + self(x) + self._record_training_state_sharding_spec() + self.make_predict_function() + state = self._get_jax_state(trainable_variables=True, non_trainable_variables=True, metrics_variables=False, purge_model_variables=False) + self._jax_state_synced = False + (batch_outputs, non_trainable_variables) = self.predict_function(state, [(x,)]) + self._jax_state = {'non_trainable_variables': non_trainable_variables} + self.jax_state_sync() + batch_outputs = tree.map_structure(lambda x: np.array(x), batch_outputs) + return batch_outputs + + def jax_state_sync(self): + if not getattr(self, '_jax_state', None) or self._jax_state_synced: + return + trainable_variables = self._jax_state.get('trainable_variables', None) + non_trainable_variables = self._jax_state.get('non_trainable_variables', None) + optimizer_variables = self._jax_state.get('optimizer_variables', None) + metrics_variables = self._jax_state.get('metrics_variables', None) + if trainable_variables: + for (ref_v, v) in zip(self.trainable_variables, trainable_variables): + ref_v.assign(v) + if non_trainable_variables: + for (ref_v, v) in zip(self.non_trainable_variables, non_trainable_variables): + ref_v.assign(v) + if optimizer_variables: + for (ref_v, v) in zip(self.optimizer.variables, optimizer_variables): + ref_v.assign(v) + if metrics_variables: + for (ref_v, v) in zip(self.metrics_variables, metrics_variables): + ref_v.assign(v) + self._jax_state_synced = True + + def _record_training_state_sharding_spec(self): + self._trainable_variable_shardings = [v.value.sharding for v in self.trainable_variables] + self._non_trainable_variable_shardings = [v.value.sharding for v in self.non_trainable_variables] + if hasattr(self, 'optimizer') and self.optimizer is not None: + self._optimizer_variable_shardings = [v.value.sharding for v in self.optimizer.variables] + else: + self._optimizer_variable_shardings = [] + self._metrics_variable_shardings = [v.value.sharding for v in self.metrics_variables] + + def _enforce_jax_state_sharding(self, trainable_variables=None, non_trainable_variables=None, optimizer_variables=None, metrics_variables=None): + trainable_variables = trainable_variables or [] + non_trainable_variables = non_trainable_variables or [] + optimizer_variables = optimizer_variables or [] + metrics_variables = metrics_variables or [] + for i in range(len(trainable_variables)): + trainable_variables[i] = jax.lax.with_sharding_constraint(trainable_variables[i], self._trainable_variable_shardings[i]) + for i in range(len(non_trainable_variables)): + non_trainable_variables[i] = jax.lax.with_sharding_constraint(non_trainable_variables[i], self._non_trainable_variable_shardings[i]) + for i in range(len(optimizer_variables)): + optimizer_variables[i] = jax.lax.with_sharding_constraint(optimizer_variables[i], self._optimizer_variable_shardings[i]) + for i in range(len(metrics_variables)): + metrics_variables[i] = jax.lax.with_sharding_constraint(metrics_variables[i], self._metrics_variable_shardings[i]) + return (trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables) + + def _purge_model_variables(self, trainable_variables=False, non_trainable_variables=False, optimizer_variables=False, metrics_variables=False): + if trainable_variables: + for v in self.trainable_variables: + v._value = None + if non_trainable_variables: + for v in self.non_trainable_variables: + v._value = None + if optimizer_variables: + for v in self.optimizer.variables: + v._value = None + if metrics_variables: + for v in self.metrics_variables: + v._value = None + + def _get_jax_state(self, trainable_variables=False, non_trainable_variables=False, optimizer_variables=False, metrics_variables=False, purge_model_variables=False): + state = [] + if trainable_variables: + state.append([v.value for v in self.trainable_variables]) + if non_trainable_variables: + state.append([v.value for v in self.non_trainable_variables]) + if optimizer_variables: + state.append([v.value for v in self.optimizer.variables]) + if metrics_variables: + state.append([v.value for v in self.metrics_variables]) + if purge_model_variables: + self._purge_model_variables(trainable_variables=trainable_variables, non_trainable_variables=non_trainable_variables, optimizer_variables=optimizer_variables, metrics_variables=metrics_variables) + return tuple(state) + +def _distribute_data(data, layouts=None): + distribution = distribution_lib.distribution() + if distribution is not None: + if layouts is None: + layouts = tree.map_structure(lambda d: distribution.get_data_layout(d.shape), data) + return tree.map_structure(jax_distribution_lib.distribute_data_input, data, layouts) + return tree.map_structure(jax.device_put, data) + +class JAXEpochIterator(EpochIterator): + + def _get_iterator(self): + distribution = distribution_lib.distribution() + if distribution is not None: + return self._get_distributed_iterator(distribution) + return self._prefetch_numpy_iterator(self.data_adapter.get_jax_iterator()) + + def _get_distributed_iterator(self, distribution): + layouts = None + for data in self.data_adapter.get_jax_iterator(): + if layouts is None: + layouts = tree.map_structure(lambda d: jax_distribution_lib._to_jax_layout(distribution.get_data_layout(d.shape)), data) + yield _distribute_data(data, layouts) + + def _prefetch_numpy_iterator(self, numpy_iterator): + queue = collections.deque() + + def enqueue(n=2): + for data in itertools.islice(numpy_iterator, n): + queue.append(_distribute_data(data)) + enqueue(n=2) + while queue: + yield queue.popleft() + enqueue(1) + +# File: keras-master/keras/src/backend/numpy/__init__.py +from keras.src.backend.numpy import core +from keras.src.backend.numpy import image +from keras.src.backend.numpy import linalg +from keras.src.backend.numpy import math +from keras.src.backend.numpy import nn +from keras.src.backend.numpy import numpy +from keras.src.backend.numpy import random +from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.numpy.core import Variable +from keras.src.backend.numpy.core import cast +from keras.src.backend.numpy.core import compute_output_spec +from keras.src.backend.numpy.core import cond +from keras.src.backend.numpy.core import convert_to_numpy +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.backend.numpy.core import random_seed_dtype +from keras.src.backend.numpy.core import shape +from keras.src.backend.numpy.core import vectorized_map +from keras.src.backend.numpy.rnn import cudnn_ok +from keras.src.backend.numpy.rnn import gru +from keras.src.backend.numpy.rnn import lstm +from keras.src.backend.numpy.rnn import rnn + +# File: keras-master/keras/src/backend/numpy/core.py +import builtins +import functools +import warnings +import numpy as np +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.backend_utils import slice_along_axis +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.symbolic_scope import SymbolicScope +SUPPORTS_SPARSE_TENSORS = False + +class Variable(KerasVariable): + + def _initialize(self, value): + self._value = np.array(value, dtype=self._dtype) + + def _direct_assign(self, value): + self._value = np.array(value, dtype=self._dtype) + + def _convert_to_tensor(self, value, dtype=None): + return convert_to_tensor(value, dtype=dtype) + + def __array__(self): + return self.value + +def convert_to_tensor(x, dtype=None, sparse=None): + if sparse: + raise ValueError('`sparse=True` is not supported with numpy backend') + if dtype is not None: + dtype = standardize_dtype(dtype) + if isinstance(x, Variable): + if dtype and dtype != x.dtype: + return x.value.astype(dtype) + return x.value + if not is_tensor(x) and standardize_dtype(dtype) == 'bfloat16': + return np.asarray(x).astype(dtype) + if dtype is None: + dtype = result_type(*[getattr(item, 'dtype', type(item)) for item in tree.flatten(x)]) + return np.array(x, dtype=dtype) + +def convert_to_numpy(x): + return np.array(x) + +def is_tensor(x): + if isinstance(x, (np.generic, np.ndarray)): + return True + return False + +def shape(x): + return x.shape + +def cast(x, dtype): + return convert_to_tensor(x, dtype=dtype) + +def cond(pred, true_fn, false_fn): + if pred: + return true_fn() + return false_fn() + +def vectorized_map(function, elements): + if not isinstance(elements, (list, tuple)): + return np.stack([function(x) for x in elements]) + else: + batch_size = elements[0].shape[0] + output_store = [] + for index in range(batch_size): + output_store.append(function([x[index] for x in elements])) + return np.stack(output_store) + +def compute_output_spec(fn, *args, **kwargs): + with StatelessScope(), SymbolicScope(): + + def has_none_shape(x): + if isinstance(x, KerasTensor): + return None in x.shape + return False + none_in_shape = any(builtins.map(has_none_shape, tree.flatten((args, kwargs)))) + + def convert_keras_tensor_to_numpy(x, fill_value=None): + if isinstance(x, KerasTensor): + shape = list(x.shape) + if fill_value: + for (i, e) in enumerate(shape): + if e is None: + shape[i] = fill_value + return np.empty(shape=shape, dtype=x.dtype) + return x + (args_1, kwargs_1) = tree.map_structure(lambda x: convert_keras_tensor_to_numpy(x, fill_value=83), (args, kwargs)) + outputs_1 = fn(*args_1, **kwargs_1) + outputs = outputs_1 + if none_in_shape: + (args_2, kwargs_2) = tree.map_structure(lambda x: convert_keras_tensor_to_numpy(x, fill_value=89), (args, kwargs)) + outputs_2 = fn(*args_2, **kwargs_2) + flat_out_1 = tree.flatten(outputs_1) + flat_out_2 = tree.flatten(outputs_2) + flat_out = [] + for (x1, x2) in zip(flat_out_1, flat_out_2): + shape = list(x1.shape) + for (i, e) in enumerate(x2.shape): + if e != shape[i]: + shape[i] = None + flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype))) + outputs = tree.pack_sequence_as(outputs_1, flat_out) + + def convert_numpy_to_keras_tensor(x): + if is_tensor(x): + return KerasTensor(x.shape, standardize_dtype(x.dtype)) + return x + output_spec = tree.map_structure(convert_numpy_to_keras_tensor, outputs) + return output_spec + +def map(f, xs): + + def g(_, x): + return ((), f(x)) + (_, ys) = scan(g, (), xs) + return ys + +def scan(f, init, xs=None, length=None, reverse=False, unroll=1): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + if not isinstance(unroll, bool): + if not isinstance(unroll, int) or unroll < 1: + raise ValueError(f'`unroll` must be an positive integer or boolean. Received: unroll={unroll}') + if xs is None and length is None: + raise ValueError('Got no `xs` to scan over and `length` not provided.') + input_is_sequence = tree.is_nested(xs) + output_is_sequence = tree.is_nested(init) + + def pack_input(x): + return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0] + + def pack_output(x): + return tree.pack_sequence_as(init, x) if output_is_sequence else x[0] + if xs is None: + xs_flat = [] + n = int(length) + else: + xs_flat = tree.flatten(xs) + xs_flat = [convert_to_tensor(elem) for elem in xs_flat] + n = int(length) if length is not None else shape(xs_flat[0])[0] + init_flat = tree.flatten(init) + init_flat = [convert_to_tensor(init) for init in init_flat] + init = pack_output(init_flat) + dummy_y = [np.zeros_like(init) for init in init_flat] + carry = init + ys = [] + maybe_reversed = reversed if reverse else lambda x: x + for i in maybe_reversed(range(n)): + xs_slice = [x[i] for x in xs_flat] + packed_xs = pack_input(xs_slice) if len(xs_slice) > 0 else None + (carry, y) = f(carry, packed_xs) + ys.append(y if y is not None else dummy_y) + stacked_y = tree.map_structure(lambda *ys: np.stack(ys), *maybe_reversed(ys)) + return (carry, stacked_y) + +def associative_scan(f, elems, reverse=False, axis=0): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + elems_flat = tree.flatten(elems) + elems_flat = [convert_to_tensor(elem) for elem in elems_flat] + if reverse: + elems_flat = [np.flip(elem, (axis,)) for elem in elems_flat] + + def _combine(a_flat, b_flat): + a = tree.pack_sequence_as(elems, a_flat) + b = tree.pack_sequence_as(elems, b_flat) + c = f(a, b) + c_flat = tree.flatten(c) + return c_flat + num_elems = int(elems_flat[0].shape[axis]) + if not all((int(elem.shape[axis]) == num_elems for elem in elems_flat[1:])): + raise ValueError('Array inputs to associative_scan must have the same first dimension. (saw: {})'.format([elem.shape for elem in elems_flat])) + + def _interleave(a, b, axis): + assert a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1 + a_shape = list(a.shape) + a_shape[axis] = a.shape[axis] * 2 - 1 + b_shape = list(b.shape) + b_shape[axis] = b.shape[axis] * 2 - 1 + a_dil = np.zeros(a_shape) + np.copyto(slice_along_axis(a_dil, 0, None, 2, axis), a) + b_dil = np.zeros(b_shape) + np.copyto(slice_along_axis(b_dil, 0, None, 2, axis), b) + a_pad = [[0, 0] for _ in range(a.ndim)] + a_pad[axis][-1] = 1 if a.shape[axis] == b.shape[axis] else 0 + b_pad = [[0, 0] for _ in range(b.ndim)] + b_pad[axis] = [1, 0] if a.shape[axis] == b.shape[axis] else [1, 1] + op = np.bitwise_or if a.dtype == np.bool_ else np.add + return op(np.pad(a_dil, a_pad), np.pad(b_dil, b_pad)) + + def _scan(elems): + num_elems = elems[0].shape[axis] + if num_elems < 2: + return elems + reduced_elems = _combine([slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems], [slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems]) + odd_elems = _scan(reduced_elems) + if num_elems % 2 == 0: + even_elems = _combine([slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems], [slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems]) + else: + even_elems = _combine(odd_elems, [slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems]) + even_elems = [np.concatenate([slice_along_axis(elem, 0, 1, axis=axis), result], axis=axis) for (elem, result) in zip(elems, even_elems)] + return list(builtins.map(functools.partial(_interleave, axis=axis), even_elems, odd_elems)) + scans = _scan(elems_flat) + if reverse: + scans = [np.flip(scanned, (axis,)) for scanned in scans] + return tree.pack_sequence_as(elems, scans) + +def scatter(indices, values, shape): + indices = convert_to_tensor(indices) + values = convert_to_tensor(values) + zeros = np.zeros(shape, dtype=values.dtype) + index_length = indices.shape[-1] + value_shape = shape[index_length:] + indices = np.reshape(indices, [-1, index_length]) + values = np.reshape(values, [-1] + list(value_shape)) + for i in range(indices.shape[0]): + index = indices[i] + zeros[tuple(index)] += values[i] + return zeros + +def scatter_update(inputs, indices, updates): + indices = np.array(indices) + indices = np.transpose(indices) + inputs[tuple(indices)] = updates + return inputs + +def slice(inputs, start_indices, lengths): + assert len(start_indices) == len(lengths) + indices = [np.arange(start, start + length) for (start, length) in zip(start_indices, lengths)] + mesh = np.ix_(*indices) + return inputs[mesh] + +def slice_update(inputs, start_indices, updates): + indices = [np.arange(start, start + length) for (start, length) in zip(start_indices, updates.shape)] + mesh = np.ix_(*indices) + inputs[mesh] = updates + return inputs + +def switch(index, branches, *operands): + index = convert_to_tensor(index, 'int32') + index = np.clip(index, 0, len(branches) - 1) + return branches[index](*operands) + +def while_loop(cond, body, loop_vars, maximum_iterations=None): + current_iter = 0 + iteration_check = lambda iter: maximum_iterations is None or iter < maximum_iterations + is_tuple = isinstance(loop_vars, (tuple, list)) + loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,) + loop_vars = tree.map_structure(convert_to_tensor, loop_vars) + while cond(*loop_vars) and iteration_check(current_iter): + loop_vars = body(*loop_vars) + if not isinstance(loop_vars, (list, tuple)): + loop_vars = (loop_vars,) + loop_vars = tuple(loop_vars) + current_iter += 1 + return loop_vars if is_tuple else loop_vars[0] + +def fori_loop(lower, upper, body_fun, init_val): + val = init_val + for i in range(lower, upper): + val = body_fun(i, val) + return val + +def stop_gradient(x): + return x + +def unstack(x, num=None, axis=0): + x = np.moveaxis(x, axis, 0) + return [x[i] for i in range(x.shape[0])] + +def random_seed_dtype(): + return 'uint32' + +class custom_gradient: + + def __init__(self, fun): + warnings.warn('`custom_gradient` for the numpy backend acts as a pass-through to support the forward pass. No gradient computation or modification takes place.') + self.fun = fun + + def __call__(self, *args, **kwargs): + (outputs, _) = self.fun(*args, **kwargs) + return outputs + +# File: keras-master/keras/src/backend/numpy/image.py +import jax +import numpy as np +from keras.src import backend +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.utils.module_utils import scipy +RESIZE_INTERPOLATIONS = ('bilinear', 'nearest', 'lanczos3', 'lanczos5', 'bicubic') + +def rgb_to_grayscale(images, data_format=None): + images = convert_to_tensor(images) + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + original_dtype = images.dtype + compute_dtype = backend.result_type(images.dtype, float) + images = images.astype(compute_dtype) + rgb_weights = np.array([0.2989, 0.587, 0.114], dtype=images.dtype) + grayscales = np.tensordot(images, rgb_weights, axes=(channels_axis, -1)) + grayscales = np.expand_dims(grayscales, axis=channels_axis) + return grayscales.astype(original_dtype) + +def rgb_to_hsv(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + eps = np.finfo(dtype).eps + images = np.where(np.abs(images) < eps, 0.0, images) + (red, green, blue) = np.split(images, 3, channels_axis) + red = np.squeeze(red, channels_axis) + green = np.squeeze(green, channels_axis) + blue = np.squeeze(blue, channels_axis) + + def rgb_planes_to_hsv_planes(r, g, b): + value = np.maximum(np.maximum(r, g), b) + minimum = np.minimum(np.minimum(r, g), b) + range_ = value - minimum + safe_value = np.where(value > 0, value, 1.0) + safe_range = np.where(range_ > 0, range_, 1.0) + saturation = np.where(value > 0, range_ / safe_value, 0.0) + norm = 1.0 / (6.0 * safe_range) + hue = np.where(value == g, norm * (b - r) + 2.0 / 6.0, norm * (r - g) + 4.0 / 6.0) + hue = np.where(value == r, norm * (g - b), hue) + hue = np.where(range_ > 0, hue, 0.0) + (hue < 0.0).astype(hue.dtype) + return (hue, saturation, value) + images = np.stack(rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis) + return images.astype(dtype) + +def hsv_to_rgb(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + (hue, saturation, value) = np.split(images, 3, channels_axis) + hue = np.squeeze(hue, channels_axis) + saturation = np.squeeze(saturation, channels_axis) + value = np.squeeze(value, channels_axis) + + def hsv_planes_to_rgb_planes(hue, saturation, value): + dh = np.mod(hue, 1.0) * 6.0 + dr = np.clip(np.abs(dh - 3.0) - 1.0, 0.0, 1.0) + dg = np.clip(2.0 - np.abs(dh - 2.0), 0.0, 1.0) + db = np.clip(2.0 - np.abs(dh - 4.0), 0.0, 1.0) + one_minus_s = 1.0 - saturation + red = value * (one_minus_s + saturation * dr) + green = value * (one_minus_s + saturation * dg) + blue = value * (one_minus_s + saturation * db) + return (red, green, blue) + images = np.stack(hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis) + return images.astype(dtype) + +def resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in RESIZE_INTERPOLATIONS: + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}') + if fill_mode != 'constant': + raise ValueError(f"Invalid value for argument `fill_mode`. Only `'constant'` is supported. Received: fill_mode={fill_mode}") + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError('Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` can be `True`.') + if not len(size) == 2: + raise ValueError(f'Argument `size` must be a tuple of two elements (height, width). Received: size={size}') + size = tuple(size) + (target_height, target_width) = size + if len(images.shape) == 4: + if data_format == 'channels_last': + size = (images.shape[0],) + size + (images.shape[-1],) + else: + size = (images.shape[0], images.shape[1]) + size + elif len(images.shape) == 3: + if data_format == 'channels_last': + size = size + (images.shape[-1],) + else: + size = (images.shape[0],) + size + else: + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if crop_to_aspect_ratio: + shape = images.shape + if data_format == 'channels_last': + (height, width) = (shape[-3], shape[-2]) + else: + (height, width) = (shape[-2], shape[-1]) + crop_height = int(float(width * target_height) / target_width) + crop_height = max(min(height, crop_height), 1) + crop_width = int(float(height * target_width) / target_height) + crop_width = max(min(width, crop_width), 1) + crop_box_hstart = int(float(height - crop_height) / 2) + crop_box_wstart = int(float(width - crop_width) / 2) + if data_format == 'channels_last': + if len(images.shape) == 4: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + else: + images = images[crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + elif len(images.shape) == 4: + images = images[:, :, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + else: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + elif pad_to_aspect_ratio: + shape = images.shape + batch_size = images.shape[0] + if data_format == 'channels_last': + (height, width, channels) = (shape[-3], shape[-2], shape[-1]) + else: + (channels, height, width) = (shape[-3], shape[-2], shape[-1]) + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if data_format == 'channels_last': + if len(images.shape) == 4: + padded_img = np.ones((batch_size, pad_height + height, pad_width + width, channels), dtype=images.dtype) * fill_value + padded_img[:, img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width, :] = images + else: + padded_img = np.ones((pad_height + height, pad_width + width, channels), dtype=images.dtype) * fill_value + padded_img[img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width, :] = images + elif len(images.shape) == 4: + padded_img = np.ones((batch_size, channels, pad_height + height, pad_width + width), dtype=images.dtype) * fill_value + padded_img[:, :, img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width] = images + else: + padded_img = np.ones((channels, pad_height + height, pad_width + width), dtype=images.dtype) * fill_value + padded_img[:, img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width] = images + images = padded_img + return np.array(jax.image.resize(images, size, method=interpolation, antialias=antialias)) +AFFINE_TRANSFORM_INTERPOLATIONS = {'nearest': 0, 'bilinear': 1} +AFFINE_TRANSFORM_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'} + +def affine_transform(images, transform, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys(): + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: interpolation={interpolation}') + if fill_mode not in AFFINE_TRANSFORM_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected of one {AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}') + transform = convert_to_tensor(transform) + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if len(transform.shape) not in (1, 2): + raise ValueError(f'Invalid transform rank: expected rank 1 (single transform) or rank 2 (batch of transforms). Received input with shape: transform.shape={transform.shape}') + input_dtype = images.dtype + if input_dtype == 'float16': + images = images.astype('float32') + need_squeeze = False + if len(images.shape) == 3: + images = np.expand_dims(images, axis=0) + need_squeeze = True + if len(transform.shape) == 1: + transform = np.expand_dims(transform, axis=0) + if data_format == 'channels_first': + images = np.transpose(images, (0, 2, 3, 1)) + batch_size = images.shape[0] + meshgrid = np.meshgrid(*[np.arange(size) for size in images.shape[1:]], indexing='ij') + indices = np.concatenate([np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1) + indices = np.tile(indices, (batch_size, 1, 1, 1, 1)) + a0 = transform[:, 0].copy() + a2 = transform[:, 2].copy() + b1 = transform[:, 4].copy() + b2 = transform[:, 5].copy() + transform[:, 0] = b1 + transform[:, 2] = b2 + transform[:, 4] = a0 + transform[:, 5] = a2 + transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1) + transform = np.reshape(transform, (batch_size, 3, 3)) + offset = transform[:, 0:2, 2].copy() + offset = np.pad(offset, pad_width=[[0, 0], [0, 1]]) + transform[:, 0:2, 2] = 0 + coordinates = np.einsum('Bhwij, Bjk -> Bhwik', indices, transform) + coordinates = np.moveaxis(coordinates, source=-1, destination=1) + coordinates += np.reshape(a=offset, newshape=(*offset.shape, 1, 1, 1)) + affined = np.stack([map_coordinates(images[i], coordinates[i], order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation], fill_mode=fill_mode, fill_value=fill_value) for i in range(batch_size)], axis=0) + if data_format == 'channels_first': + affined = np.transpose(affined, (0, 3, 1, 2)) + if need_squeeze: + affined = np.squeeze(affined, axis=0) + if input_dtype == 'float16': + affined = affined.astype(input_dtype) + return affined +MAP_COORDINATES_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'} + +def map_coordinates(inputs, coordinates, order, fill_mode='constant', fill_value=0.0): + inputs = convert_to_tensor(inputs) + coordinates = convert_to_tensor(coordinates) + if coordinates.shape[0] != len(inputs.shape): + raise ValueError(f'First dim of `coordinates` must be the same as the rank of `inputs`. Received inputs with shape: {inputs.shape} and coordinate leading dim of {coordinates.shape[0]}') + if len(coordinates.shape) < 2: + raise ValueError(f'Invalid coordinates rank: expected at least rank 2. Received input with shape: {coordinates.shape}') + if fill_mode not in MAP_COORDINATES_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected one of {set(MAP_COORDINATES_FILL_MODES.keys())}. Received: fill_mode={fill_mode}') + if order not in range(2): + raise ValueError(f'Invalid value for argument `order`. Expected one of {[0, 1]}. Received: order={order}') + padding = [(max(-np.floor(c.min()).astype(int) + 1, 0), max(np.ceil(c.max()).astype(int) + 1 - size, 0)) for (c, size) in zip(coordinates, inputs.shape)] + shifted_coords = [c + p[0] for (p, c) in zip(padding, coordinates)] + pad_mode = {'nearest': 'edge', 'mirror': 'reflect', 'reflect': 'symmetric'}.get(fill_mode, fill_mode) + if fill_mode == 'constant': + padded = np.pad(inputs, padding, mode=pad_mode, constant_values=fill_value) + else: + padded = np.pad(inputs, padding, mode=pad_mode) + result = scipy.ndimage.map_coordinates(padded, shifted_coords, order=order, mode=fill_mode, cval=fill_value) + return result + +# File: keras-master/keras/src/backend/numpy/linalg.py +import numpy as np +import scipy.linalg as sl +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.numpy.core import convert_to_tensor + +def cholesky(a): + return np.linalg.cholesky(a) + +def det(a): + return np.linalg.det(a) + +def eig(a): + return np.linalg.eig(a) + +def eigh(a): + return np.linalg.eigh(a) + +def inv(a): + return np.linalg.inv(a) + +def lu_factor(a): + if a.ndim == 2: + return sl.lu_factor(a) + (m, n) = a.shape[-2:] + signature = '(m,n) -> (m,n), ' + signature += '(m)' if m <= n else '(n)' + _lu_factor_gufunc = np.vectorize(sl.lu_factor, signature=signature) + return _lu_factor_gufunc(a) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if 'int' in dtype or dtype == 'bool': + dtype = dtypes.result_type(x.dtype, 'float32') + return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(dtype) + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + return np.linalg.qr(x, mode=mode) + +def solve(a, b): + return np.linalg.solve(a, b) + +def solve_triangular(a, b, lower=False): + if a.ndim == 2: + return sl.solve_triangular(a, b, lower=lower) + _vectorized_solve_triangular = np.vectorize(lambda a, b: sl.solve_triangular(a, b, lower=lower), signature='(n,n),(n,m)->(n,m)') + if b.ndim == a.ndim - 1: + b = np.expand_dims(b, axis=-1) + return _vectorized_solve_triangular(a, b).squeeze(axis=-1) + return _vectorized_solve_triangular(a, b) + +def svd(x, full_matrices=True, compute_uv=True): + return np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv) + +def lstsq(a, b, rcond=None): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return np.linalg.lstsq(a, b, rcond=rcond)[0] + +# File: keras-master/keras/src/backend/numpy/math.py +import numpy as np +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.math import fft as jax_fft +from keras.src.backend.jax.math import fft2 as jax_fft2 +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.utils.module_utils import scipy + +def _segment_reduction_fn(data, segment_ids, reduction_method, num_segments, sorted): + if num_segments is None: + num_segments = np.amax(segment_ids) + 1 + valid_indices = segment_ids >= 0 + valid_data = data[valid_indices] + valid_segment_ids = segment_ids[valid_indices] + data_shape = list(valid_data.shape) + data_shape[0] = num_segments + if reduction_method == np.maximum: + result = np.ones(data_shape, dtype=valid_data.dtype) * -np.inf + else: + result = np.zeros(data_shape, dtype=valid_data.dtype) + if sorted: + reduction_method.at(result, valid_segment_ids, valid_data) + else: + sort_indices = np.argsort(valid_segment_ids) + sorted_segment_ids = valid_segment_ids[sort_indices] + sorted_data = valid_data[sort_indices] + reduction_method.at(result, sorted_segment_ids, sorted_data) + return result + +def segment_sum(data, segment_ids, num_segments=None, sorted=False): + return _segment_reduction_fn(data, segment_ids, np.add, num_segments, sorted) + +def segment_max(data, segment_ids, num_segments=None, sorted=False): + return _segment_reduction_fn(data, segment_ids, np.maximum, num_segments, sorted) + +def top_k(x, k, sorted=False): + if sorted: + sorted_indices = np.argsort(x, axis=-1)[..., ::-1] + sorted_values = np.take_along_axis(x, sorted_indices, axis=-1) + top_k_values = sorted_values[..., :k] + top_k_indices = sorted_indices[..., :k] + else: + top_k_indices = np.argpartition(x, -k, axis=-1)[..., -k:] + top_k_values = np.take_along_axis(x, top_k_indices, axis=-1) + return (top_k_values, top_k_indices) + +def in_top_k(targets, predictions, k): + targets = targets[:, None] + topk_values = top_k(predictions, k)[0] + targets_values = np.take_along_axis(predictions, targets, axis=-1) + mask = targets_values >= topk_values + return np.any(mask, axis=-1) + +def logsumexp(x, axis=None, keepdims=False): + max_x = np.max(x, axis=axis, keepdims=True) + result = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True)) + max_x + return np.squeeze(result) if not keepdims else result + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + return np.linalg.qr(x, mode=mode) + +def extract_sequences(x, sequence_length, sequence_stride): + (*batch_shape, _) = x.shape + batch_shape = list(batch_shape) + shape = x.shape[:-1] + ((x.shape[-1] - (sequence_length - sequence_stride)) // sequence_stride, sequence_length) + strides = x.strides[:-1] + (sequence_stride * x.strides[-1], x.strides[-1]) + x = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) + return np.reshape(x, (*batch_shape, *x.shape[-2:])) + +def _get_complex_tensor_from_tuple(x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if not np.issubdtype(real.dtype, np.floating) or not np.issubdtype(imag.dtype, np.floating): + raise ValueError(f'At least one tensor in input `x` is not of type float.Received: x={x}.') + complex_input = real + 1j * imag + return complex_input + +def fft(x): + (real, imag) = jax_fft(x) + return (np.array(real), np.array(imag)) + +def fft2(x): + (real, imag) = jax_fft2(x) + return (np.array(real), np.array(imag)) + +def rfft(x, fft_length=None): + complex_output = np.fft.rfft(x, n=fft_length, axis=-1, norm='backward') + return (np.real(complex_output).astype(x.dtype), np.imag(complex_output).astype(x.dtype)) + +def irfft(x, fft_length=None): + complex_input = _get_complex_tensor_from_tuple(x) + return np.fft.irfft(complex_input, n=fft_length, axis=-1, norm='backward').astype(x[0].dtype) + +def stft(x, sequence_length, sequence_stride, fft_length, window='hann', center=True): + if standardize_dtype(x.dtype) not in {'float32', 'float64'}: + raise TypeError(f'Invalid input type. Expected `float32` or `float64`. Received: input type={x.dtype}') + if fft_length < sequence_length: + raise ValueError(f'`fft_length` must equal or larger than `sequence_length`. Received: sequence_length={sequence_length}, fft_length={fft_length}') + if isinstance(window, str): + if window not in {'hann', 'hamming'}: + raise ValueError(f'If a string is passed to `window`, it must be one of `"hann"`, `"hamming"`. Received: window={window}') + x = convert_to_tensor(x) + ori_dtype = x.dtype + if center: + pad_width = [(0, 0) for _ in range(len(x.shape))] + pad_width[-1] = (fft_length // 2, fft_length // 2) + x = np.pad(x, pad_width, mode='reflect') + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + win = convert_to_tensor(scipy.signal.get_window(window, sequence_length), dtype=x.dtype) + else: + win = convert_to_tensor(window, dtype=x.dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + win = np.pad(win, [[l_pad, r_pad]]) + else: + win = np.ones(sequence_length + l_pad + r_pad, dtype=x.dtype) + x = scipy.signal.stft(x, fs=1.0, window=win, nperseg=sequence_length + l_pad + r_pad, noverlap=sequence_length + l_pad + r_pad - sequence_stride, nfft=fft_length, boundary=None, padded=False)[-1] + x = x / np.sqrt(1.0 / win.sum() ** 2) + x = np.swapaxes(x, -2, -1) + return (np.real(x).astype(ori_dtype), np.imag(x).astype(ori_dtype)) + +def istft(x, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + x = _get_complex_tensor_from_tuple(x) + dtype = np.real(x).dtype + expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1) + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + win = convert_to_tensor(scipy.signal.get_window(window, sequence_length), dtype=dtype) + else: + win = convert_to_tensor(window, dtype=dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + win = np.pad(win, [[l_pad, r_pad]]) + else: + win = np.ones(sequence_length + l_pad + r_pad, dtype=dtype) + x = scipy.signal.istft(x, fs=1.0, window=win, nperseg=sequence_length + l_pad + r_pad, noverlap=sequence_length + l_pad + r_pad - sequence_stride, nfft=fft_length, boundary=False, time_axis=-2, freq_axis=-1)[-1] + x = x / win.sum() if window is not None else x / sequence_stride + start = 0 if center is False else fft_length // 2 + if length is not None: + end = start + length + elif center is True: + end = -(fft_length // 2) + else: + end = expected_output_len + return x[..., start:end] + +def rsqrt(x): + return 1.0 / np.sqrt(x) + +def erf(x): + return np.array(scipy.special.erf(x)) + +def erfinv(x): + return np.array(scipy.special.erfinv(x)) + +def solve(a, b): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return np.linalg.solve(a, b) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if 'int' in dtype or dtype == 'bool': + dtype = dtypes.result_type(x.dtype, 'float32') + return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(dtype) + +def logdet(x): + from keras.src.backend.numpy.numpy import slogdet + return slogdet(x)[1] + +# File: keras-master/keras/src/backend/numpy/nn.py +import jax +import numpy as np +from jax import lax +from keras.src import backend +from keras.src.backend.common.backend_utils import compute_conv_transpose_padding_args_for_jax +from keras.src.backend.numpy.core import cast +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.utils.module_utils import scipy + +def relu(x): + x = convert_to_tensor(x) + return np.maximum(x, np.array(0.0, x.dtype)) + +def relu6(x): + x = convert_to_tensor(x) + return np.minimum(np.maximum(x, np.array(0.0, x.dtype)), np.array(6.0, x.dtype)) + +def sigmoid(x): + x = convert_to_tensor(x) + return np.array(1.0, x.dtype) / (np.array(1.0, x.dtype) + np.exp(-x)) + +def tanh(x): + return np.tanh(x) + +def softplus(x): + x = convert_to_tensor(x) + return np.logaddexp(x, np.array(0.0, x.dtype)) + +def softsign(x): + x = convert_to_tensor(x) + return x / (np.array(1.0, x.dtype) + np.abs(x)) + +def silu(x): + x = convert_to_tensor(x) + return x * sigmoid(x) + +def log_sigmoid(x): + x = convert_to_tensor(x) + return -softplus(-x) + +def leaky_relu(x, negative_slope=0.2): + x = convert_to_tensor(x) + return np.maximum(x, np.array(negative_slope, x.dtype) * x) + +def hard_sigmoid(x): + x = x / np.array(6.0, x.dtype) + np.array(0.5, x.dtype) + return np.where(x <= 0.0, np.array(0.0, x.dtype), np.where(x >= 1.0, np.array(1.0, x.dtype), x)) + +def hard_silu(x): + return x * hard_sigmoid(x) + +def elu(x, alpha=1.0): + x = convert_to_tensor(x) + return np.where(x >= np.array(0.0, x.dtype), x, np.array(alpha, x.dtype) * np.expm1(x)) + +def selu(x, alpha=1.6732632423543772, scale=1.0507009873554805): + x = convert_to_tensor(x) + return np.array(scale, x.dtype) * elu(x, alpha) + +def gelu(x, approximate=True): + x = convert_to_tensor(x) + if approximate: + sqrt_2_over_pi = np.sqrt(2 / np.pi).astype(x.dtype) + cdf = np.array(0.5, x.dtype) * (np.array(1.0, x.dtype) + np.tanh(sqrt_2_over_pi * (x + np.array(0.044715, x.dtype) * (x ** 3).astype(x.dtype)))) + return x * cdf + else: + sqrt_2 = np.sqrt(2).astype(x.dtype) + return x * (scipy.special.erf(x / sqrt_2) + 1).astype(x.dtype) / np.array(2, x.dtype) + +def softmax(x, axis=None): + exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True)) + return exp_x / np.sum(exp_x, axis=axis, keepdims=True) + +def log_softmax(x, axis=None): + max_x = np.max(x, axis=axis, keepdims=True) + logsumexp = np.log(np.exp(x - max_x).sum(axis=axis, keepdims=True)) + return x - max_x - logsumexp + +def _convert_to_spatial_operand(x, num_spatial_dims, data_format='channels_last', include_batch_and_channels=True): + x = (x,) * num_spatial_dims if isinstance(x, int) else x + if not include_batch_and_channels: + return x + if data_format == 'channels_last': + x = (1,) + x + (1,) + else: + x = (1,) + (1,) + x + return x + +def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'): + if padding not in ('same', 'valid'): + raise ValueError(f"Invalid padding '{padding}', must be 'same' or 'valid'.") + padding = padding.upper() + return np.array(lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding)) + +def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + pool_size = _convert_to_spatial_operand(pool_size, num_spatial_dims, data_format) + strides = pool_size if strides is None else strides + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format) + return _pool(inputs, -np.inf, lax.max, pool_size, strides, padding) + +def average_pool(inputs, pool_size, strides, padding, data_format=None): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + pool_size = _convert_to_spatial_operand(pool_size, num_spatial_dims, data_format) + strides = pool_size if strides is None else strides + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format) + pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding) + if padding == 'valid': + return pooled / np.prod(pool_size) + else: + shape = [a if b != 1 else 1 for (a, b) in zip(inputs.shape, pool_size)] + window_counts = _pool(np.ones(shape, inputs.dtype), 0.0, lax.add, pool_size, strides, padding) + return pooled / window_counts + +def _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format='channels_last', transpose=False): + num_dims = num_spatial_dims + 2 + if data_format == 'channels_last': + spatial_dims = tuple(range(1, num_dims - 1)) + inputs_dn = (0, num_dims - 1) + spatial_dims + else: + spatial_dims = tuple(range(2, num_dims)) + inputs_dn = (0, 1) + spatial_dims + if transpose: + kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2)) + else: + kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2)) + return lax.ConvDimensionNumbers(lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn) + +def conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + if data_format == 'channels_last': + channels = inputs.shape[-1] + else: + channels = inputs.shape[1] + kernel_in_channels = kernel.shape[-2] + if channels % kernel_in_channels > 0: + raise ValueError(f"The number of input channels must be evenly divisible by kernel's in_channels. Received input channels {channels} and kernel in_channels {kernel_in_channels}. ") + feature_group_count = channels // kernel_in_channels + return np.array(jax.lax.conv_general_dilated(inputs, kernel if is_tensor(kernel) else kernel.numpy(), strides, padding, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count)) + +def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + feature_group_count = inputs.shape[-1] if data_format == 'channels_last' else inputs.shape[1] + kernel = np.reshape(kernel if is_tensor(kernel) else kernel.numpy(), kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1])) + return np.array(jax.lax.conv_general_dilated(inputs, kernel, strides, padding, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count)) + +def separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + depthwise_conv_output = depthwise_conv(inputs, depthwise_kernel, strides, padding, data_format, dilation_rate) + return conv(depthwise_conv_output, pointwise_kernel, strides=1, padding='valid', data_format=data_format, dilation_rate=dilation_rate) + +def conv_transpose(inputs, kernel, strides=1, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = inputs.ndim - 2 + padding_values = compute_conv_transpose_padding_args_for_jax(input_shape=inputs.shape, kernel_shape=kernel.shape, strides=strides, padding=padding, output_padding=output_padding, dilation_rate=dilation_rate) + dimension_numbers = _convert_to_lax_conv_dimension_numbers(num_spatial_dims, data_format, transpose=False) + strides = _convert_to_spatial_operand(strides, num_spatial_dims, data_format, include_batch_and_channels=False) + dilation_rate = _convert_to_spatial_operand(dilation_rate, num_spatial_dims, data_format, include_batch_and_channels=False) + return np.array(jax.lax.conv_transpose(inputs, kernel if is_tensor(kernel) else kernel.numpy(), strides, padding=padding_values, rhs_dilation=dilation_rate, dimension_numbers=dimension_numbers, transpose_kernel=True)) + +def one_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with numpy backend') + x = convert_to_tensor(x) + input_shape = x.shape + x = x.reshape(-1) + if not num_classes: + num_classes = np.max(x) + 1 + batch_size = x.shape[0] + categorical = np.zeros((batch_size, num_classes), dtype=dtype) + valid_indices = x >= 0 + categorical[np.arange(batch_size)[valid_indices], x[valid_indices]] = 1 + output_shape = input_shape + (num_classes,) + categorical = np.reshape(categorical, output_shape) + if axis != -1: + categorical = np.moveaxis(categorical, -1, axis) + return categorical + +def multi_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with numpy backend') + x = convert_to_tensor(x) + reduction_axis = 1 if len(x.shape) > 1 else 0 + outputs = np.max(one_hot(cast(x, 'int32'), num_classes, axis=axis, dtype=dtype), axis=reduction_axis) + return outputs + +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = np.array(target) + output = np.array(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if len(target.shape) < 1: + raise ValueError(f'Arguments `target` and `output` must be at least rank 1. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = log_softmax(output, axis=axis) + else: + output = output / np.sum(output, axis, keepdims=True) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = np.log(output) + return -np.sum(target * log_prob, axis=axis) + +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = np.array(target, dtype='int32') + output = np.array(output) + if len(target.shape) == len(output.shape) and target.shape[-1] == 1: + target = np.squeeze(target, axis=-1) + if len(output.shape) < 1: + raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') + if target.shape != output.shape[:-1]: + raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = log_softmax(output, axis=axis) + else: + output = output / np.sum(output, axis, keepdims=True) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = np.log(output) + target = one_hot(target, output.shape[axis], axis=axis) + return -np.sum(target * log_prob, axis=axis) + +def binary_crossentropy(target, output, from_logits=False): + target = np.array(target) + output = np.array(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + output = sigmoid(output) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + bce = target * np.log(output) + bce += (1.0 - target) * np.log(1.0 - output) + return -bce + +def moments(x, axes, keepdims=False, synchronized=False): + if synchronized: + raise NotImplementedError('Argument synchronized=True is not supported with NumPy.') + axes = tuple(axes) if isinstance(axes, list) else axes + need_cast = False + ori_dtype = backend.standardize_dtype(x.dtype) + if ori_dtype == 'float16': + need_cast = True + x = cast(x, 'float32') + mean = np.mean(x, axes, keepdims=True) + variance = np.mean(np.square(x), axis=axes, keepdims=True) - np.square(mean) + if not keepdims: + mean = np.squeeze(mean, axes) + variance = np.squeeze(variance, axes) + if need_cast: + mean = np.clip(mean, np.finfo(np.float16).min, np.finfo(np.float16).max) + variance = np.clip(variance, np.finfo(np.float16).min, np.finfo(np.float16).max) + mean = cast(mean, ori_dtype) + variance = cast(variance, ori_dtype) + return (mean, variance) + +def batch_normalization(x, mean, variance, axis, offset=None, scale=None, epsilon=0.001): + shape = [1] * len(x.shape) + shape[axis] = mean.shape[0] + mean = np.reshape(mean, shape) + variance = np.reshape(variance, shape) + inv = 1.0 / np.sqrt(variance + epsilon) + if scale is not None: + scale = np.reshape(scale, shape) + inv = inv * scale + res = -mean * inv + if offset is not None: + offset = np.reshape(offset, shape) + res = res + offset + return x * inv + res + +def ctc_loss(target, output, target_length, output_length, mask_index=0): + target = convert_to_tensor(target, dtype='int32') + output = convert_to_tensor(output) + target_length = convert_to_tensor(target_length, 'int32') + output_length = convert_to_tensor(output_length, 'int32') + (batch_size, max_input_length, num_classes) = output.shape + (batch_size, max_label_length) = target.shape + log_epsilon = -100000.0 + dtype = backend.result_type(output.dtype, 'float32') + output = output.astype(dtype) + + def _lengths_to_paddings(lengths, max_length): + indices = np.arange(max_length).reshape((1,) * lengths.ndim + (max_length,)) + lengths = np.expand_dims(lengths, axis=-1) + elem_valid = indices < lengths + return np.logical_not(elem_valid) + target_paddings = _lengths_to_paddings(target_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_input_length) + target_paddings = target_paddings.astype(output.dtype) + output_paddings = output_paddings.astype(output.dtype) + logprobs = log_softmax(output, axis=-1) + label_lengths = max_label_length - np.sum(target_paddings, axis=1).astype(np.int32) + repeat = (target[:, :-1] == target[:, 1:]).astype(np.float32) + repeat = np.pad(repeat, ((0, 0), (0, 1))) + logprobs_phi = logprobs[:, :, mask_index:mask_index + 1] + logprobs_phi = np.transpose(logprobs_phi, (1, 0, 2)) + _one_hot = one_hot(target, num_classes=num_classes) + logprobs_emit = np.einsum('btk,bnk->btn', logprobs, _one_hot) + logprobs_emit = np.transpose(logprobs_emit, (1, 0, 2)) + logalpha_phi_init = np.ones((batch_size, max_label_length + 1), dtype=output.dtype) * log_epsilon + logalpha_phi_init[:, 0] = 0.0 + logalpha_emit_init = np.ones((batch_size, max_label_length), dtype=output.dtype) * log_epsilon + + def update_phi_score(phi, added_score): + return np.concatenate([phi[:, :1], np.logaddexp(phi[:, 1:], added_score)], axis=-1) + + def loop_body(prev, x): + (prev_phi, prev_emit) = prev + prev_phi_orig = prev_phi + prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat) + (logprob_emit, logprob_phi, pad) = x + next_emit = np.logaddexp(prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit) + next_phi = prev_phi + logprob_phi + next_phi = update_phi_score(next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)) + pad = pad.reshape((batch_size, 1)) + next_emit = pad * prev_emit + (1.0 - pad) * next_emit + next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi + return ((next_phi, next_emit), (next_phi, next_emit)) + + def np_scan(f, init, xs): + carry = init + ys = [] + for x in zip(*xs): + (carry, y) = f(carry, x) + ys.append(y) + result = [] + for i in range(len(ys[0])): + result.append(np.stack([y[i] for y in ys])) + return (carry, result) + xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0))) + (_, (logalpha_phi, logalpha_emit)) = np_scan(loop_body, (logalpha_phi_init, logalpha_emit_init), xs) + logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1]) + logalpha_phi[-1] = logalpha_phi_last + _one_hot = one_hot(label_lengths, num_classes=max_label_length + 1) + per_seq_loss = -np.einsum('bn,bn->b', logalpha_phi_last, _one_hot) + return per_seq_loss + +def _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=True, mask_index=None): + inputs = convert_to_tensor(inputs) + sequence_lengths = convert_to_tensor(sequence_lengths, dtype='int32') + (batch_size, max_length, num_classes) = inputs.shape + if mask_index is None: + mask_index = num_classes - 1 + indices = np.argmax(inputs, axis=-1).astype('int32') + scores = np.max(inputs, axis=-1) + seqlen_mask = np.arange(max_length)[None, :] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] + indices = np.where(seqlen_mask, mask_index, indices) + scores = np.where(seqlen_mask, 0.0, scores) + if merge_repeated: + repeat_mask = indices[:, 1:] == indices[:, :-1] + repeat_mask = np.pad(repeat_mask, ((0, 0), (1, 0))) + indices = np.where(repeat_mask, mask_index, indices) + invalid_mask = indices == mask_index + indices = np.where(invalid_mask, -1, indices) + order = np.expand_dims(np.arange(max_length), axis=0) + order = np.tile(order, (batch_size, 1)) + order = np.where(invalid_mask, max_length, order) + order = np.argsort(order, axis=-1) + indices = np.take_along_axis(indices, order, axis=-1) + scores = -np.sum(scores, axis=1)[:, None] + indices = np.expand_dims(indices, axis=0) + return (indices, scores) + +def _ctc_beam_search_decode(inputs, sequence_lengths, beam_width=100, top_paths=1, mask_index=None): + inputs = convert_to_tensor(inputs) + sequence_lengths = convert_to_tensor(sequence_lengths) + (batch_size, max_seq_len, num_classes) = inputs.shape + inputs = log_softmax(inputs, axis=-1) + seqlen_mask = np.arange(max_seq_len)[None, :] >= sequence_lengths[:, None] + if mask_index is None: + mask_index = num_classes - 1 + inputs = np.flip(inputs, axis=2) + mask_index = num_classes - mask_index - 1 + _pad = -1 + init_paths = np.full((batch_size, 2 * beam_width, max_seq_len), _pad, dtype=np.int32) + num_init_paths = np.min(np.array([num_classes, beam_width])) + max_classes = np.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:] + init_classes = np.where(max_classes == mask_index, _pad, max_classes) + init_paths[:, :num_init_paths, 0] = init_classes + init_scores = np.full((batch_size, 2 * beam_width), -np.inf, dtype=inputs.dtype) + init_scores[:, :num_init_paths] = np.take_along_axis(inputs[:, 0], max_classes, axis=1) + init_masked = init_paths[:, :, 0] == _pad + + def _extend_paths(paths, scores, masked, x): + paths = np.repeat(paths, num_classes, axis=0) + scores = np.repeat(scores, num_classes) + masked = np.repeat(masked, num_classes) + path_tail_index = np.argmax(paths == _pad, axis=1) + paths_arange = np.arange(2 * beam_width * num_classes) + path_tails = paths[paths_arange, path_tail_index - 1] + path_tails = np.where(path_tail_index == 0, _pad, path_tails) + classes = np.arange(num_classes) + classes[mask_index] = _pad + classes = np.tile(classes, 2 * beam_width) + prev_masked = masked + masked = classes == _pad + masked_repeat = ~prev_masked & (path_tails == classes) + classes = np.where(masked_repeat, _pad, classes) + paths[paths_arange, path_tail_index] = classes + x = np.tile(x, 2 * beam_width) + scores = scores + x + return (paths, scores, masked) + + def _merge_scores(unique_inverse, scores): + scores_max = np.max(scores) + scores_exp = np.exp(scores - scores_max) + scores = np.zeros_like(scores) + for (i, u) in enumerate(unique_inverse): + scores[u] += scores_exp[i] + scores = np.log(scores) + scores_max + return scores + + def _prune_paths(paths, scores, masked): + (paths, unique_inverse) = np.unique(paths, return_inverse=True, axis=0) + pad_size = 2 * num_classes * beam_width - len(paths) + if pad_size > 0: + paths = np.pad(paths, [[0, pad_size], [0, 0]], constant_values=_pad) + paths = paths[:2 * num_classes * beam_width] + if len(unique_inverse.shape) >= 2: + unique_inverse = np.squeeze(unique_inverse, axis=1) + emit_scores = np.where(masked, -np.inf, scores) + mask_scores = np.where(masked, scores, -np.inf) + emit_scores = _merge_scores(unique_inverse, emit_scores) + mask_scores = _merge_scores(unique_inverse, mask_scores) + total_scores = np.logaddexp(emit_scores, mask_scores) + top_indices = np.argsort(total_scores, kind='stable')[-beam_width:] + paths = paths[top_indices] + emit_scores = emit_scores[top_indices] + mask_scores = mask_scores[top_indices] + paths = np.tile(paths, (2, 1)) + scores = np.concatenate([emit_scores, mask_scores]) + masked = np.concatenate([np.zeros(beam_width, bool), np.ones(beam_width, bool)]) + return (paths, scores, masked) + + def _decode_step(paths, scores, masked, x): + (paths, scores, masked) = _extend_paths(paths, scores, masked, x) + (paths, scores, masked) = _prune_paths(paths, scores, masked) + return (paths, scores, masked) + + def _step(prev, x): + (paths, scores, masked) = prev + (x, seqlen_mask) = x + if not seqlen_mask: + (paths, scores, masked) = _decode_step(paths, scores, masked, x) + return ((paths, scores, masked), None) + + def _decode_batch(init_paths, init_scores, init_masked, inputs, seqlen_mask): + + def np_scan_only_carry(f, init, xs): + carry = init + for x in zip(*xs): + (carry, y) = f(carry, x) + return (carry, None) + ((paths, scores, masked), _) = np_scan_only_carry(_step, (init_paths, init_scores, init_masked), (inputs[1:], seqlen_mask[1:])) + (paths, unique_inverse) = np.unique(paths, return_inverse=True, axis=0) + pad_size = 2 * num_classes * beam_width - len(paths) + if pad_size > 0: + paths = np.pad(paths, [[0, pad_size], [0, 0]], constant_values=_pad) + paths = paths[:2 * num_classes * beam_width] + if len(unique_inverse.shape) >= 2: + unique_inverse = np.squeeze(unique_inverse, axis=1) + scores = _merge_scores(unique_inverse, scores) + top_indices = np.argsort(scores)[-top_paths:][::-1] + paths = paths[top_indices] + scores = scores[top_indices] + return (paths, scores) + results = [_decode_batch(p, s, m, i, sm) for (p, s, m, i, sm) in zip(init_paths, init_scores, init_masked, inputs, seqlen_mask)] + paths = np.stack([r[0] for r in results]) + scores = np.stack([r[1] for r in results]) + paths = np.where(paths == _pad, _pad, num_classes - paths - 1) + paths = np.transpose(paths, [1, 0, 2]) + return (paths, scores) + +def ctc_decode(inputs, sequence_lengths, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, 'float32') + inputs = cast(inputs, dtype) + if strategy == 'greedy': + return _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index) + elif strategy == 'beam_search': + return _ctc_beam_search_decode(inputs, sequence_lengths, beam_width=beam_width, top_paths=top_paths, mask_index=mask_index) + else: + raise ValueError(f"Invalid strategy {strategy}. Supported values are 'greedy' and 'beam_search'.") + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError(f'Input shapes {x1.shape} and {x2.shape} must match for PSNR calculation. ') + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = np.mean(np.square(x1 - x2)) + psnr = 20 * np.log10(max_val) - 10 * np.log10(mse) + return psnr + +# File: keras-master/keras/src/backend/numpy/numpy.py +import numpy as np +from keras.src import tree +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import standardize_axis_for_numpy +from keras.src.backend.numpy.core import convert_to_tensor + +def add(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.add(x1, x2) + +def einsum(subscripts, *operands, **kwargs): + operands = tree.map_structure(convert_to_tensor, operands) + dtypes_to_resolve = list(set((standardize_dtype(x.dtype) for x in operands))) + if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == 'int8': + compute_dtype = 'int32' + result_dtype = 'int32' + else: + result_dtype = dtypes.result_type(*dtypes_to_resolve) + compute_dtype = result_dtype + if compute_dtype == 'bfloat16': + compute_dtype = 'float32' + operands = tree.map_structure(lambda x: x.astype(compute_dtype), operands) + return np.einsum(subscripts, *operands, **kwargs).astype(result_dtype) + +def subtract(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.subtract(x1, x2) + +def matmul(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + x1_dtype = standardize_dtype(x1.dtype) + x2_dtype = standardize_dtype(x2.dtype) + if x1_dtype == 'int8' and x2_dtype == 'int8': + dtype = 'int32' + else: + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.matmul(x1, x2).astype(dtype) + +def multiply(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.multiply(x1, x2) + +def mean(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + result_dtype = dtypes.result_type(x.dtype, 'float32') + else: + result_dtype = ori_dtype + return np.mean(x, axis=axis, keepdims=keepdims).astype(result_dtype) + +def max(x, axis=None, keepdims=False, initial=None): + axis = standardize_axis_for_numpy(axis) + return np.max(x, axis=axis, keepdims=keepdims, initial=initial) + +def ones(shape, dtype=None): + dtype = dtype or config.floatx() + return np.ones(shape, dtype=dtype) + +def zeros(shape, dtype=None): + dtype = dtype or config.floatx() + return np.zeros(shape, dtype=dtype) + +def absolute(x): + return np.absolute(x) + +def abs(x): + return absolute(x) + +def all(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.all(x, axis=axis, keepdims=keepdims) + +def any(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.any(x, axis=axis, keepdims=keepdims) + +def amax(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.amax(x, axis=axis, keepdims=keepdims) + +def amin(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.amin(x, axis=axis, keepdims=keepdims) + +def append(x1, x2, axis=None): + axis = standardize_axis_for_numpy(axis) + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.append(x1, x2, axis=axis) + +def arange(start, stop=None, step=None, dtype=None): + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(step, 'dtype', type(step))] + if stop is not None: + dtypes_to_resolve.append(getattr(stop, 'dtype', type(stop))) + dtype = dtypes.result_type(*dtypes_to_resolve) + return np.arange(start, stop, step=step, dtype=dtype) + +def arccos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arccos(x) + +def arccosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arccosh(x) + +def arcsin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arcsin(x) + +def arcsinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arcsinh(x) + +def arctan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arctan(x) + +def arctan2(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.arctan2(x1, x2) + +def arctanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.arctanh(x) + +def argmax(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.argmax(x, axis=axis, keepdims=keepdims).astype('int32') + +def argmin(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + return np.argmin(x, axis=axis, keepdims=keepdims).astype('int32') + +def argsort(x, axis=-1): + axis = standardize_axis_for_numpy(axis) + return np.argsort(x, axis=axis).astype('int32') + +def array(x, dtype=None): + return convert_to_tensor(x, dtype=dtype) + +def average(x, axis=None, weights=None): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype, float] + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + x = x.astype(dtype) + if weights is not None: + weights = weights.astype(dtype) + return np.average(x, weights=weights, axis=axis) + +def bincount(x, weights=None, minlength=0, sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with numpy backend') + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype] + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + else: + dtype = 'int32' + if len(x.shape) == 2: + if weights is None: + + def bincount_fn(arr): + return np.bincount(arr, minlength=minlength) + bincounts = list(map(bincount_fn, x)) + else: + + def bincount_fn(arr_w): + return np.bincount(arr_w[0], weights=arr_w[1], minlength=minlength) + bincounts = list(map(bincount_fn, zip(x, weights))) + return np.stack(bincounts).astype(dtype) + return np.bincount(x, weights, minlength).astype(dtype) + +def bitwise_and(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return np.bitwise_and(x, y) + +def bitwise_invert(x): + x = convert_to_tensor(x) + return np.bitwise_not(x) + +def bitwise_not(x): + return bitwise_invert(x) + +def bitwise_or(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return np.bitwise_or(x, y) + +def bitwise_xor(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return np.bitwise_xor(x, y) + +def bitwise_left_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return np.left_shift(x, y) + +def left_shift(x, y): + return bitwise_left_shift(x, y) + +def bitwise_right_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return np.right_shift(x, y) + +def right_shift(x, y): + return bitwise_right_shift(x, y) + +def broadcast_to(x, shape): + return np.broadcast_to(x, shape) + +def ceil(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.ceil(x) + +def clip(x, x_min, x_max): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype == 'bool': + dtype = 'int32' + return np.clip(x, x_min, x_max).astype(dtype) + +def concatenate(xs, axis=0): + axis = standardize_axis_for_numpy(axis) + dtype_set = set([getattr(x, 'dtype', type(x)) for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: convert_to_tensor(x).astype(dtype), xs) + return np.concatenate(xs, axis=axis) + +def conjugate(x): + return np.conjugate(x) + +def conj(x): + return conjugate(x) + +def copy(x): + return np.copy(x) + +def cos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.cos(x) + +def cosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.cosh(x) + +def count_nonzero(x, axis=None): + axis = standardize_axis_for_numpy(axis) + return convert_to_tensor(np.count_nonzero(x, axis=axis)).astype('int32') + +def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): + axis = standardize_axis_for_numpy(axis) + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.cross(x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) + +def cumprod(x, axis=None, dtype=None): + axis = standardize_axis_for_numpy(axis) + dtype = dtypes.result_type(dtype or x.dtype) + if dtype == 'bool': + dtype = 'int32' + return np.cumprod(x, axis=axis, dtype=dtype) + +def cumsum(x, axis=None, dtype=None): + axis = standardize_axis_for_numpy(axis) + dtype = dtypes.result_type(dtype or x.dtype) + if dtype == 'bool': + dtype = 'int32' + return np.cumsum(x, axis=axis, dtype=dtype) + +def diag(x, k=0): + return np.diag(x, k=k) + +def diagonal(x, offset=0, axis1=0, axis2=1): + axis1 = standardize_axis_for_numpy(axis1) + axis2 = standardize_axis_for_numpy(axis2) + return np.diagonal(x, offset=offset, axis1=axis1, axis2=axis2) + +def diff(a, n=1, axis=-1): + return np.diff(a, n=n, axis=axis) + +def digitize(x, bins): + return np.digitize(x, bins).astype(np.int32) + +def dot(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = x.astype(dtype) + y = y.astype(dtype) + return np.dot(x, y) + +def empty(shape, dtype=None): + dtype = dtype or config.floatx() + return np.empty(shape, dtype=dtype) + +def equal(x1, x2): + return np.equal(x1, x2) + +def exp(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = x.astype(config.floatx()) + return np.exp(x) + +def expand_dims(x, axis): + axis = standardize_axis_for_numpy(axis) + return np.expand_dims(x, axis) + +def expm1(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = x.astype(config.floatx()) + return np.expm1(x) + +def flip(x, axis=None): + axis = standardize_axis_for_numpy(axis) + return np.flip(x, axis=axis) + +def floor(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.floor(x) + +def full(shape, fill_value, dtype=None): + dtype = dtype or config.floatx() + return np.full(shape, fill_value, dtype=dtype) + +def full_like(x, fill_value, dtype=None): + return np.full_like(x, fill_value, dtype=dtype) + +def greater(x1, x2): + return np.greater(x1, x2) + +def greater_equal(x1, x2): + return np.greater_equal(x1, x2) + +def hstack(xs): + dtype_set = set([getattr(x, 'dtype', type(x)) for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: convert_to_tensor(x).astype(dtype), xs) + return np.hstack(xs) + +def identity(n, dtype=None): + dtype = dtype or config.floatx() + return np.identity(n, dtype=dtype) + +def imag(x): + return np.imag(x) + +def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + return np.isclose(x1, x2, rtol, atol, equal_nan) + +def isfinite(x): + return np.isfinite(x) + +def isinf(x): + return np.isinf(x) + +def isnan(x): + return np.isnan(x) + +def less(x1, x2): + return np.less(x1, x2) + +def less_equal(x1, x2): + return np.less_equal(x1, x2) + +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + axis = standardize_axis_for_numpy(axis) + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(stop, 'dtype', type(stop)), float] + dtype = dtypes.result_type(*dtypes_to_resolve) + return np.linspace(start, stop, num=num, endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis) + +def log(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return np.log(x, dtype=dtype) + +def log10(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return np.log10(x, dtype=dtype) + +def log1p(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return np.log1p(x, dtype=dtype) + +def log2(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return np.log2(x, dtype=dtype) + +def logaddexp(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.logaddexp(x1, x2) + +def logical_and(x1, x2): + return np.logical_and(x1, x2) + +def logical_not(x): + return np.logical_not(x) + +def logical_or(x1, x2): + return np.logical_or(x1, x2) + +def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(stop, 'dtype', type(stop)), float] + dtype = dtypes.result_type(*dtypes_to_resolve) + return np.logspace(start, stop, num=num, endpoint=endpoint, base=base, dtype=dtype, axis=axis) + +def maximum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.maximum(x1, x2) + +def median(x, axis=None, keepdims=False): + dtype = dtypes.result_type(x.dtype, float) + return np.median(x, axis=axis, keepdims=keepdims).astype(dtype) + +def meshgrid(*x, indexing='xy'): + return np.meshgrid(*x, indexing=indexing) + +def min(x, axis=None, keepdims=False, initial=None): + axis = standardize_axis_for_numpy(axis) + return np.min(x, axis=axis, keepdims=keepdims, initial=initial) + +def minimum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.minimum(x1, x2) + +def mod(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + if dtype == 'bool': + dtype = 'int32' + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.mod(x1, x2) + +def moveaxis(x, source, destination): + return np.moveaxis(x, source=source, destination=destination) + +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + return np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + +def ndim(x): + return np.ndim(x) + +def nonzero(x): + return tuple((indices.astype('int32') for indices in np.nonzero(x))) + +def not_equal(x1, x2): + return np.not_equal(x1, x2) + +def zeros_like(x, dtype=None): + return np.zeros_like(x, dtype=dtype) + +def ones_like(x, dtype=None): + return np.ones_like(x, dtype=dtype) + +def outer(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.outer(x1, x2) + +def pad(x, pad_width, mode='constant', constant_values=None): + kwargs = {} + if constant_values is not None: + if mode != 'constant': + raise ValueError(f"Argument `constant_values` can only be provided when `mode == 'constant'`. Received: mode={mode}") + kwargs['constant_values'] = constant_values + return np.pad(x, pad_width, mode=mode, **kwargs) + +def prod(x, axis=None, keepdims=False, dtype=None): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + if dtype is None: + dtype = dtypes.result_type(x.dtype) + if dtype in ('bool', 'int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + return np.prod(x, axis=axis, keepdims=keepdims, dtype=dtype) + +def quantile(x, q, axis=None, method='linear', keepdims=False): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if ori_dtype == 'bool': + x = x.astype(config.floatx()) + if ori_dtype == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + return np.quantile(x, q, axis=axis, method=method, keepdims=keepdims).astype(dtype) + +def ravel(x): + return np.ravel(x) + +def real(x): + return np.real(x) + +def reciprocal(x): + return np.reciprocal(x) + +def repeat(x, repeats, axis=None): + return np.repeat(x, repeats, axis=axis) + +def reshape(x, newshape): + return np.reshape(x, newshape) + +def roll(x, shift, axis=None): + return np.roll(x, shift, axis=axis) + +def searchsorted(sorted_sequence, values, side='left'): + if ndim(sorted_sequence) != 1: + raise ValueError(f'`searchsorted` only supports 1-D sorted sequences. You can use `keras.ops.vectorized_map` to extend it to N-D sequences. Received: sorted_sequence.shape={sorted_sequence.shape}') + out_type = 'int32' if len(sorted_sequence) <= np.iinfo(np.int32).max else 'int64' + return np.searchsorted(sorted_sequence, values, side=side).astype(out_type) + +def sign(x): + return np.sign(x) + +def sin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.sin(x) + +def sinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.sinh(x) + +def size(x): + return np.size(x) + +def sort(x, axis=-1): + axis = standardize_axis_for_numpy(axis) + return np.sort(x, axis=axis) + +def split(x, indices_or_sections, axis=0): + axis = standardize_axis_for_numpy(axis) + return np.split(x, indices_or_sections, axis=axis) + +def stack(x, axis=0): + axis = standardize_axis_for_numpy(axis) + dtype_set = set([getattr(a, 'dtype', type(a)) for a in x]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + x = tree.map_structure(lambda a: convert_to_tensor(a).astype(dtype), x) + return np.stack(x, axis=axis) + +def std(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = x.astype(config.floatx()) + return np.std(x, axis=axis, keepdims=keepdims) + +def swapaxes(x, axis1, axis2): + return np.swapaxes(x, axis1=axis1, axis2=axis2) + +def take(x, indices, axis=None): + axis = standardize_axis_for_numpy(axis) + return np.take(x, indices, axis=axis) + +def take_along_axis(x, indices, axis=None): + axis = standardize_axis_for_numpy(axis) + return np.take_along_axis(x, indices, axis=axis) + +def tan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.tan(x) + +def tanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = x.astype(dtype) + return np.tanh(x) + +def tensordot(x1, x2, axes=2): + axes = tuple(axes) if isinstance(axes, list) else axes + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.tensordot(x1, x2, axes=axes) + +def round(x, decimals=0): + return np.round(x, decimals=decimals) + +def tile(x, repeats): + return np.tile(x, repeats) + +def trace(x, offset=0, axis1=0, axis2=1): + axis1 = standardize_axis_for_numpy(axis1) + axis2 = standardize_axis_for_numpy(axis2) + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype not in ('int64', 'uint32', 'uint64'): + dtype = dtypes.result_type(dtype, 'int32') + return np.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype) + +def tri(N, M=None, k=0, dtype=None): + dtype = dtype or config.floatx() + return np.tri(N, M=M, k=k, dtype=dtype) + +def tril(x, k=0): + return np.tril(x, k=k) + +def triu(x, k=0): + return np.triu(x, k=k) + +def trunc(x): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if 'int' in dtype or 'bool' == dtype: + return x + return np.trunc(x) + +def vdot(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = x1.astype(dtype) + x2 = x2.astype(dtype) + return np.vdot(x1, x2) + +def vstack(xs): + dtype_set = set([getattr(x, 'dtype', type(x)) for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: convert_to_tensor(x).astype(dtype), xs) + return np.vstack(xs) + +def vectorize(pyfunc, *, excluded=None, signature=None): + return np.vectorize(pyfunc, excluded=excluded, signature=signature) + +def where(condition, x1, x2): + if x1 is not None and x2 is not None: + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.where(condition, x1, x2) + else: + return np.where(condition) + +def divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.divide(x1, x2) + +def divide_no_nan(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.where(x2 == 0, 0, np.divide(x1, x2)) + +def true_divide(x1, x2): + return divide(x1, x2) + +def power(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.power(x1, x2) + +def negative(x): + return np.negative(x) + +def square(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = x.astype('int32') + return np.square(x) + +def sqrt(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return np.sqrt(x, dtype=dtype) + +def squeeze(x, axis=None): + axis = standardize_axis_for_numpy(axis) + return np.squeeze(x, axis=axis) + +def transpose(x, axes=None): + axes = tuple(axes) if isinstance(axes, list) else axes + return np.transpose(x, axes=axes) + +def var(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + x = convert_to_tensor(x) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + return np.var(x, axis=axis, keepdims=keepdims, dtype=compute_dtype).astype(result_dtype) + +def sum(x, axis=None, keepdims=False): + axis = standardize_axis_for_numpy(axis) + dtype = standardize_dtype(x.dtype) + if dtype in ('bool', 'int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + return np.sum(x, axis=axis, keepdims=keepdims).astype(dtype) + +def eye(N, M=None, k=0, dtype=None): + dtype = dtype or config.floatx() + return np.eye(N, M=M, k=k, dtype=dtype) + +def floor_divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.floor_divide(x1, x2) + +def logical_xor(x1, x2): + return np.logical_xor(x1, x2) + +def correlate(x1, x2, mode='valid'): + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if dtype == 'int64': + dtype = 'float64' + elif dtype not in ['bfloat16', 'float16', 'float64']: + dtype = 'float32' + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return np.correlate(x1, x2, mode) + +def select(condlist, choicelist, default=0): + return np.select(condlist, choicelist, default=default) + +def slogdet(x): + return tuple(np.linalg.slogdet(x)) + +def argpartition(x, kth, axis=-1): + return np.argpartition(x, kth, axis).astype('int32') + +# File: keras-master/keras/src/backend/numpy/random.py +import numpy as np +from keras.src.backend.config import floatx +from keras.src.backend.numpy.nn import softmax +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed + +def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + return rng.normal(size=shape, loc=mean, scale=stddev).astype(dtype) + +def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + return rng.uniform(size=shape, low=minval, high=maxval).astype(dtype) + +def categorical(logits, num_samples, dtype='int64', seed=None): + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + output = [] + for logits_instance in logits: + probabilities = softmax(logits_instance) + classes = np.arange(logits_instance.shape[-1]) + samples = rng.choice(classes, size=num_samples, p=probabilities) + output.append(samples) + return np.array(output).astype(dtype) + +def randint(shape, minval, maxval, dtype='int32', seed=None): + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + output = rng.integers(low=minval, high=maxval, size=shape, dtype=dtype) + return output + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + lower_bound = mean - 2 * stddev + upper_bound = mean + 2 * stddev + flat_shape = np.prod(shape) + random_numbers = np.empty(0) + while random_numbers.shape[0] < flat_shape: + batch = rng.normal(loc=mean, scale=stddev, size=flat_shape) + valid = batch[(batch >= lower_bound) & (batch <= upper_bound)] + random_numbers = np.append(random_numbers, valid) + return random_numbers[:flat_shape].astype(dtype).reshape(shape) + +def dropout(inputs, rate, noise_shape=None, seed=None): + dtype = inputs.dtype + seed = draw_seed(seed) + keep_prob = 1.0 - rate + if noise_shape is None: + noise_shape = inputs.shape + else: + noise_shape = [n if n is not None else inputs.shape[i] for (i, n) in enumerate(noise_shape)] + rng = np.random.default_rng(seed) + mask = rng.uniform(size=noise_shape) < keep_prob + mask = np.broadcast_to(mask, inputs.shape) + return np.where(mask, (inputs / keep_prob).astype(dtype), np.zeros_like(inputs)) + +def shuffle(x, axis=0, seed=None): + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + return rng.permuted(x, axis=axis) + +def gamma(shape, alpha, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + return rng.gamma(alpha, scale=1.0, size=shape).astype(dtype) + +def binomial(shape, counts, probabilities, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + sample = rng.binomial(n=counts, p=probabilities, size=shape).astype(dtype) + return sample + +def beta(shape, alpha, beta, dtype=None, seed=None): + dtype = dtype or floatx() + seed = draw_seed(seed) + rng = np.random.default_rng(seed) + sample = rng.beta(a=alpha, b=beta, size=shape).astype(dtype) + return sample + +# File: keras-master/keras/src/backend/numpy/rnn.py +import numpy as np +from keras.src import tree + +def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False, return_all_outputs=True): + + def swap_batch_timestep(input_t): + axes = list(range(len(input_t.shape))) + (axes[0], axes[1]) = (1, 0) + return np.transpose(input_t, axes) + if not time_major: + inputs = tree.map_structure(swap_batch_timestep, inputs) + flattened_inputs = tree.flatten(inputs) + time_steps = flattened_inputs[0].shape[0] + if mask is not None: + if mask.dtype != 'bool': + mask = mask.astype('bool') + if len(mask.shape) == 2: + mask = np.expand_dims(mask, axis=-1) + if not time_major: + mask = swap_batch_timestep(mask) + if constants is None: + constants = [] + + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tree.is_nested(mask_t): + raise ValueError(f'mask_t is expected to be tensor, but got {mask_t}') + if tree.is_nested(input_t): + raise ValueError(f'input_t is expected to be tensor, but got {input_t}') + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = np.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:]) + return np.tile(mask_t, multiples) + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + def _process_single_input_t(input_t): + input_t = unstack(input_t) + if go_backwards: + input_t.reverse() + return input_t + if tree.is_nested(inputs): + processed_input = tree.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tree.pack_sequence_as(inputs, inp) + if mask is not None: + mask_list = unstack(mask) + if go_backwards: + mask_list.reverse() + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + (output, new_states) = step_function(inp, tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + if not successive_outputs: + prev_output = np.zeros_like(output) + else: + prev_output = successive_outputs[-1] + output = np.where(tiled_mask_t, output, prev_output) + flat_states = tree.flatten(states) + flat_new_states = tree.flatten(new_states) + tiled_mask_t = tuple((_expand_mask(mask_t, s) for s in flat_states)) + flat_final_states = tuple((np.where(m, s, ps) for (m, s, ps) in zip(tiled_mask_t, flat_new_states, flat_states))) + states = tree.pack_sequence_as(states, flat_final_states) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = np.stack(successive_outputs) + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + (output, states) = step_function(inp, tuple(states) + tuple(constants)) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = np.stack(successive_outputs) + else: + if mask is not None: + + def _step(states, current_input): + (current_input, current_mask) = current_input + is_masked = np.all(np.logical_not(current_mask), axis=-1, keepdims=True) + (output_t, new_states) = step_function(current_input, states) + if zero_output_for_mask: + masked_outs = np.where(is_masked, np.zeros_like(output_t), output_t) + else: + output_tm1 = states[0] + masked_outs = np.where(is_masked, output_tm1, output_t) + new_states = [np.where(is_masked, s, ns) for (s, ns) in zip(states, new_states)] + return (new_states, masked_outs) + scan_xs = (inputs, mask) + else: + + def _step(states, current_input): + (output_t, new_states) = step_function(current_input, states) + return (new_states, output_t) + scan_xs = inputs + (new_states, outputs) = numpy_scan(f=_step, init=initial_states, xs=scan_xs, reverse=go_backwards, mask=mask) + if go_backwards: + outputs = np.flip(outputs, axis=0) + last_output = outputs[-1] + if not time_major: + outputs = tree.map_structure(swap_batch_timestep, outputs) + return (last_output, outputs, new_states) + +def lstm(*args, **kwargs): + raise NotImplementedError + +def gru(*args, **kwargs): + raise NotImplementedError + +def unstack(x, axis=0): + return [x.take(i, axis) for i in range(x.shape[axis])] + +def numpy_scan(f, init, xs, reverse=False, mask=None): + states = init + outputs = [] + if mask is not None: + (x, mask) = xs + x = np.flip(x, axis=0) if reverse else x + mask = np.flip(mask, axis=0) if reverse else mask + for (each_x, each_mask) in zip(x, mask): + (states, output) = f(states, (each_x, each_mask)) + outputs.append(output) + else: + xs = np.flip(xs, axis=0) if reverse else xs + for x in xs: + (states, output) = f(states, x) + outputs.append(output) + outputs = np.array(outputs) + if reverse: + outputs = np.flip(outputs, axis=0) + return (states, outputs) + +def cudnn_ok(*args, **kwargs): + return False + +# File: keras-master/keras/src/backend/numpy/trainer.py +import numpy as np +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import tree +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils + +class NumpyTrainer(base_trainer.Trainer): + + def __init__(self): + super().__init__() + self.test_function = None + self.predict_function = None + + def test_step(self, data): + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + loss = self._compute_loss(x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False) + self._loss_tracker.update_state(loss, sample_weight=tree.flatten(x)[0].shape[0]) + return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) + + def predict_step(self, data): + (x, _, _) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + return y_pred + + def make_test_function(self, force=False): + if self.test_function is not None and (not force): + return self.test_function + + def one_test_step(data): + data = data[0] + return self.test_step(data) + + def multi_test_steps(data): + for single_step_data in data: + logs = one_test_step([single_step_data]) + return logs + if self.steps_per_execution > 1: + test_step = multi_test_steps + else: + test_step = one_test_step + self.test_function = test_step + + def make_predict_function(self, force=False): + if self.predict_function is not None and (not force): + return self.predict_function + + def one_predict_step(data): + data = data[0] + return self.predict_step(data) + + def multi_predict_steps(data): + outputs = one_predict_step(data[:1]) + for single_step_data in data[1:]: + step_outputs = one_predict_step([single_step_data]) + outputs = tree.map_structure(lambda t1, t2: np.concatenate([t1, t2]), outputs, step_outputs) + return outputs + if self.steps_per_execution > 1: + predict_step = multi_predict_steps + else: + predict_step = one_predict_step + self.predict_function = predict_step + + def _symbolic_build(self, data_batch): + model_unbuilt = not all((layer.built for layer in self._flatten_layers())) + compile_metrics_unbuilt = self._compile_metrics is not None and (not self._compile_metrics.built) + compile_loss_unbuilt = self._compile_loss is not None and (not self._compile_loss.built) + if model_unbuilt or compile_metrics_unbuilt or compile_loss_unbuilt: + + def to_symbolic_input(v): + if is_tensor(v): + return KerasTensor(v.shape, standardize_dtype(v.dtype)) + return v + data_batch = tree.map_structure(to_symbolic_input, data_batch) + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data_batch) + try: + y_pred = backend.compute_output_spec(self, x) + except: + raise RuntimeError("Unable to automatically build the model. Please build it yourself before calling fit/evaluate/predict. A model is 'built' when its variables have been created and its `self.built` attribute is True. Usually, calling the model on a batch of data is the right way to build it.") + if compile_metrics_unbuilt: + backend.compute_output_spec(self.compute_metrics, x, y, y_pred, sample_weight=sample_weight) + if compile_loss_unbuilt: + backend.compute_output_spec(self._compute_loss, x, y, y_pred, sample_weight=sample_weight) + self._post_build() + + def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1): + raise NotImplementedError('fit not implemented for NumPy backend.') + + @traceback_utils.filter_traceback + def predict(self, x, batch_size=None, verbose='auto', steps=None, callbacks=None): + epoch_iterator = EpochIterator(x=x, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + + def append_to_outputs(batch_outputs, outputs): + if outputs is None: + outputs = tree.map_structure(lambda batch_output: [batch_output], batch_outputs) + else: + tree.map_structure_up_to(batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs) + return outputs + self.make_predict_function() + self.stop_predicting = False + callbacks.on_predict_begin() + outputs = None + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_predict_batch_begin(step) + batch_outputs = self.predict_function(data) + outputs = append_to_outputs(batch_outputs, outputs) + callbacks.on_predict_batch_end(step, {'outputs': batch_outputs}) + if self.stop_predicting: + break + callbacks.on_predict_end() + return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs) + + @traceback_utils.filter_traceback + def evaluate(self, x=None, y=None, batch_size=None, verbose='auto', sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs): + use_cached_eval_dataset = kwargs.pop('_use_cached_eval_dataset', False) + if kwargs: + raise ValueError(f'Arguments not recognized: {kwargs}') + if use_cached_eval_dataset: + epoch_iterator = self._eval_epoch_iterator + else: + epoch_iterator = EpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + if not all((layer.built for layer in self._flatten_layers())): + for (_, data) in epoch_iterator.enumerate_epoch(): + data_batch = data[0] + self._symbolic_build(data_batch) + break + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + self.make_test_function() + self.stop_evaluating = False + callbacks.on_test_begin() + logs = {} + self.reset_metrics() + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_test_batch_begin(step) + logs = self.test_function(data) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) + if self.stop_evaluating: + break + logs = self._get_metrics_result_or_logs(logs) + callbacks.on_test_end(logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False): + raise NotImplementedError('train_on_batch not implemented for NumPy backend.') + + def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False): + self._assert_compile_called('test_on_batch') + data = (x, y, sample_weight) + self._symbolic_build(data) + self.make_test_function() + logs = self.test_function([data]) + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def predict_on_batch(self, x): + self.make_predict_function() + batch_outputs = self.predict_function([(x,)]) + batch_outputs = tree.map_structure(backend.convert_to_numpy, batch_outputs) + return batch_outputs + +# File: keras-master/keras/src/backend/tensorflow/__init__.py +from keras.src.backend.tensorflow import core +from keras.src.backend.tensorflow import distribution_lib +from keras.src.backend.tensorflow import image +from keras.src.backend.tensorflow import linalg +from keras.src.backend.tensorflow import math +from keras.src.backend.tensorflow import nn +from keras.src.backend.tensorflow import numpy +from keras.src.backend.tensorflow import random +from keras.src.backend.tensorflow import tensorboard +from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.tensorflow.core import Variable +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import compute_output_spec +from keras.src.backend.tensorflow.core import cond +from keras.src.backend.tensorflow.core import convert_to_numpy +from keras.src.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.tensorflow.core import device_scope +from keras.src.backend.tensorflow.core import is_tensor +from keras.src.backend.tensorflow.core import name_scope +from keras.src.backend.tensorflow.core import random_seed_dtype +from keras.src.backend.tensorflow.core import scatter +from keras.src.backend.tensorflow.core import shape +from keras.src.backend.tensorflow.core import stop_gradient +from keras.src.backend.tensorflow.core import vectorized_map +from keras.src.backend.tensorflow.rnn import cudnn_ok +from keras.src.backend.tensorflow.rnn import gru +from keras.src.backend.tensorflow.rnn import lstm +from keras.src.backend.tensorflow.rnn import rnn + +# File: keras-master/keras/src/backend/tensorflow/core.py +import builtins +import numpy as np +import tensorflow as tf +from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.backend_utils import slice_along_axis +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.name_scope import name_scope as base_name_scope +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.tensorflow.sparse import sparse_to_dense +from keras.src.utils.naming import auto_name +SUPPORTS_SPARSE_TENSORS = True + +class Variable(KerasVariable, tf.__internal__.types.Tensor, tf.__internal__.tracking.Trackable): + _should_act_as_resource_variable = True + + @property + def handle(self): + return self.value.handle + + def _initialize(self, value): + self._value = tf.Variable(value, dtype=self._dtype, trainable=self.trainable, name=self.name) + + def _initialize_with_initializer(self, initializer): + self._value = tf.Variable(lambda : initializer(self._shape, dtype=self._dtype), dtype=self._dtype, trainable=self.trainable, name=self.name) + + def _deferred_initialize(self): + if self._value is not None: + raise ValueError(f'Variable {self.path} is already initialized.') + if in_stateless_scope(): + raise ValueError('You are attempting to initialize a variable while in a stateless scope. This is disallowed. Make sure that all variables are initialized before you start using your layer/model objects.') + with tf.init_scope(): + self._initialize_with_initializer(self._initializer) + self._initializer = None + + def _direct_assign(self, value): + self._value.assign(tf.cast(value, self._value.dtype)) + + def _convert_to_tensor(self, value, dtype=None): + return convert_to_tensor(value, dtype=dtype) + + def numpy(self): + return self.value.numpy() + + @property + def shape(self): + return tf.TensorShape(super().shape) + + def __tf_tensor__(self, dtype=None, name=None): + return tf.convert_to_tensor(self.value, dtype=dtype, name=name) + + @property + def _shared_name(self): + return self.value._shared_name + + def _serialize_to_tensors(self): + try: + return self.value._serialize_to_tensors() + except NotImplementedError: + return {'VARIABLE_VALUE': self.value} + + def _restore_from_tensors(self, restored_tensors): + try: + return self.value._restore_from_tensors(restored_tensors) + except NotImplementedError: + self.assign(restored_tensors['VARIABLE_VALUE']) + return self.value + + def _copy_trackable_to_cpu(self, object_map): + self.value._copy_trackable_to_cpu(object_map) + object_map[self] = tf.Variable(object_map[self.value]) + + def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs): + resource_list = self.value._export_to_saved_model_graph(object_map, tensor_map, options, **kwargs) + object_map[self] = tf.Variable(object_map[self.value]) + return resource_list + + def _write_object_proto(self, proto, options): + return self.value._write_object_proto(proto, options) + +def convert_to_tensor(x, dtype=None, sparse=None): + if isinstance(x, tf.SparseTensor) and sparse is not None and (not sparse): + x = sparse_to_dense(x) + if dtype is not None: + dtype = standardize_dtype(dtype) + if not tf.is_tensor(x): + if dtype == 'bool': + x = tf.convert_to_tensor(x) + return tf.cast(x, dtype) + return tf.convert_to_tensor(x, dtype=dtype) + elif dtype is not None and (not x.dtype == dtype): + if isinstance(x, tf.SparseTensor): + x_shape = x.shape + x = tf.cast(x, dtype) + x.set_shape(x_shape) + return x + return tf.cast(x, dtype=dtype) + return x + +def convert_to_numpy(x): + if isinstance(x, tf.SparseTensor): + x = sparse_to_dense(x) + elif isinstance(x, tf.IndexedSlices): + x = tf.convert_to_tensor(x) + elif isinstance(x, tf.RaggedTensor): + x = x.to_tensor() + return np.array(x) + +def is_tensor(x): + return tf.is_tensor(x) + +def shape(x): + if isinstance(x, KerasTensor): + return x.shape + if not tf.is_tensor(x): + x = tf.convert_to_tensor(x) + if x.shape == tf.TensorShape(None): + raise ValueError(f'All tensors passed to `ops.shape` must have a statically known rank. Received: x={x} with unknown rank.') + shape = x.shape.as_list() + dynamic = tf.shape(x) + for i in range(len(shape)): + if shape[i] is None: + try: + shape[i] = dynamic[i] + except: + pass + return tuple(shape) + +def cast(x, dtype): + dtype = standardize_dtype(dtype) + if isinstance(x, tf.SparseTensor): + x_shape = x.shape + x = tf.cast(x, dtype) + x.set_shape(x_shape) + return x + else: + return tf.cast(x, dtype=dtype) + +def compute_output_spec(fn, *args, **kwargs): + with StatelessScope(), SymbolicScope(): + graph_name = auto_name('scratch_graph') + with tf.__internal__.FuncGraph(graph_name).as_default(): + + def convert_keras_tensor_to_tf(x): + if isinstance(x, KerasTensor): + if x.sparse: + return tf.compat.v1.sparse_placeholder(shape=x.shape, dtype=x.dtype) + else: + return tf.compat.v1.placeholder(shape=x.shape, dtype=x.dtype) + return x + (args, kwargs) = tree.map_structure(convert_keras_tensor_to_tf, (args, kwargs)) + tf_out = fn(*args, **kwargs) + + def convert_tf_to_keras_tensor(x): + if tf.is_tensor(x): + return KerasTensor(x.shape, x.dtype, sparse=isinstance(x, tf.SparseTensor)) + return x + output_spec = tree.map_structure(convert_tf_to_keras_tensor, tf_out) + return output_spec + +def cond(pred, true_fn, false_fn): + if isinstance(pred, tf.Variable): + return tf.cond(pred, true_fn=true_fn, false_fn=false_fn) + return tf.__internal__.smart_cond.smart_cond(pred, true_fn=true_fn, false_fn=false_fn) + +def vectorized_map(function, elements): + return tf.vectorized_map(function, elements) + +def map(f, xs): + xs = tree.map_structure(convert_to_tensor, xs) + + def get_fn_output_signature(x): + out = f(x) + return tree.map_structure(tf.TensorSpec.from_tensor, out) + if tree.is_nested(xs): + input = tree.pack_sequence_as(xs, [x[0] for x in tree.flatten(xs)]) + fn_output_signature = get_fn_output_signature(input) + return tf.map_fn(f, xs, fn_output_signature=fn_output_signature) + else: + fn_output_signature = get_fn_output_signature(xs[0]) + return tf.map_fn(f, xs, fn_output_signature=fn_output_signature) + +def scan(f, init, xs=None, length=None, reverse=False, unroll=1): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + if not isinstance(unroll, bool): + if not isinstance(unroll, int) or unroll < 1: + raise ValueError(f'`unroll` must be an positive integer or boolean. Received: unroll={unroll}') + if xs is None and length is None: + raise ValueError('Got no `xs` to scan over and `length` not provided.') + input_is_sequence = tree.is_nested(xs) + output_is_sequence = tree.is_nested(init) + + def pack_input(x): + return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0] + + def pack_output(x): + return tree.pack_sequence_as(init, x) if output_is_sequence else x[0] + if xs is None: + xs_flat = [] + n = int(length) + else: + xs_flat = tree.flatten(xs) + xs_flat = [tf.convert_to_tensor(elem) for elem in xs_flat] + n = int(length) if length is not None else tf.shape(xs_flat[0])[0] + xs_array = [tf.TensorArray(dtype=x.dtype, size=n, dynamic_size=False, element_shape=x.shape[1:], infer_shape=True) for x in xs_flat] + xs_array = [x_a.unstack(x) for (x_a, x) in zip(xs_array, xs_flat)] + init_flat = tree.flatten(init) + carry_flat = [tf.convert_to_tensor(init) for init in init_flat] + ys_array = [tf.TensorArray(dtype=carry.dtype, size=n, dynamic_size=False, element_shape=carry.shape, infer_shape=True) for carry in carry_flat] + carry_array = [tf.TensorArray(dtype=carry.dtype, size=1, dynamic_size=False, clear_after_read=False, element_shape=carry.shape, infer_shape=True) for carry in carry_flat] + carry_array = [carry.write(0, c) for (carry, c) in zip(carry_array, carry_flat)] + + def loop_body(i, carry_array, ys_array): + packed_xs = pack_input([xs.read(i) for xs in xs_array]) if len(xs_array) > 0 else None + packed_carry = pack_output([carry.read(0) for carry in carry_array]) + (carry, ys) = f(packed_carry, packed_xs) + if ys is not None: + flat_ys = tree.flatten(ys) + ys_array = [ys.write(i, v) for (ys, v) in zip(ys_array, flat_ys)] + if carry is not None: + flat_carry = tree.flatten(carry) + carry_array = [carry.write(0, v) for (carry, v) in zip(carry_array, flat_carry)] + next_i = i + 1 if not reverse else i - 1 + return (next_i, carry_array, ys_array) + if isinstance(unroll, bool): + unroll = max(n, 1) if unroll else 1 + (_, carry_array, ys_array) = tf.while_loop(lambda i, _1, _2: i >= 0 if reverse else i < n, loop_body, (n - 1 if reverse else 0, carry_array, ys_array), parallel_iterations=unroll) + ys_flat = [ys.stack() for ys in ys_array] + carry_flat = [carry.read(0) for carry in carry_array] + if xs is not None: + n_static = xs_flat[0].get_shape().with_rank_at_least(1)[0] + if not isinstance(n_static, int): + for x in xs_flat[1:]: + n_static.assert_is_compatible_with(x.get_shape().with_rank_at_least(1)[0]) + for r in ys_flat: + r.set_shape(tf.TensorShape(n_static).concatenate(r.get_shape()[1:])) + return (pack_output(carry_flat), pack_output(ys_flat)) + +def associative_scan(f, elems, reverse=False, axis=0): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + elems_flat = tree.flatten(elems) + elems_flat = [tf.convert_to_tensor(elem) for elem in elems_flat] + if reverse: + elems_flat = [tf.reverse(elem, [axis]) for elem in elems_flat] + + def _combine(a_flat, b_flat): + a = tree.pack_sequence_as(elems, a_flat) + b = tree.pack_sequence_as(elems, b_flat) + c = f(a, b) + c_flat = tree.flatten(c) + return c_flat + + def _get_dim(x): + return shape(x)[axis] + num_elems = _get_dim(elems_flat[0]) + if not all((_get_dim(elem) == num_elems for elem in elems_flat[1:])): + raise ValueError('Array inputs to associative_scan must have the same first dimension. (saw: {})'.format([tf.shape(elem) for elem in elems_flat])) + + def _interleave(a, b, axis): + num_elems_a = _get_dim(a) + num_elems_b = _get_dim(b) + axis = tf.where(axis >= 0, axis, tf.rank(a) + axis) + axis = int(axis) if tf.get_static_value(axis) is not None else axis + + def _interleave_with_b(a): + return tf.reshape(tf.concat([tf.expand_dims(a, axis=axis + 1), tf.expand_dims(b, axis=axis + 1)], axis=axis + 1), tf.concat([a.get_shape()[:axis], [2 * num_elems_b], a.get_shape()[axis + 1:]], axis=0)) + return tf.cond(tf.equal(num_elems_a, num_elems_b + 1), lambda : tf.concat([_interleave_with_b(slice_along_axis(a, None, -1, axis=axis)), slice_along_axis(a, -1, None, axis=axis)], axis=axis), lambda : _interleave_with_b(a)) + + def _scan(elems): + elem_length = _get_dim(elems[0]) + a = [slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems] + b = [slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems] + reduced_elems = _combine(a, b) + + def _handle_base_case_elem_length_two(): + return [tf.concat([slice_along_axis(elem, 0, 1, axis=axis), reduced_elem], axis=axis) for (reduced_elem, elem) in zip(reduced_elems, elems)] + + def _handle_base_case_elem_length_three(): + reduced_reduced_elems = _combine(reduced_elems, [slice_along_axis(elem, 2, 3, axis=axis) for elem in elems]) + return [tf.concat([slice_along_axis(elem, 0, 1, axis=axis), reduced_elem, reduced_reduced_elem], axis=axis) for (reduced_reduced_elem, reduced_elem, elem) in zip(reduced_reduced_elems, reduced_elems, elems)] + at_base_case = tf.logical_or(tf.equal(elem_length, 2), tf.equal(elem_length, 3)) + + def _base_case(): + return tf.cond(tf.equal(elem_length, 2), _handle_base_case_elem_length_two, _handle_base_case_elem_length_three) + + def _recursive_case(): + odd_elems = _scan(reduced_elems) + + def _even_length_case(): + return _combine([slice_along_axis(odd_elem, 0, -1, axis=axis) for odd_elem in odd_elems], [slice_along_axis(elem, 2, None, 2, axis=axis) for elem in elems]) + + def _odd_length_case(): + return _combine([odd_elem for odd_elem in odd_elems], [slice_along_axis(elem, 2, None, 2, axis=axis) for elem in elems]) + results = tf.cond(tf.equal(elem_length % 2, 0), _even_length_case, _odd_length_case) + even_elems = [tf.concat([slice_along_axis(elem, 0, 1, axis=axis), result], axis=axis) for (elem, result) in zip(elems, results)] + return list(builtins.map(lambda a, b: _interleave(a, b, axis=axis), even_elems, odd_elems)) + return tf.cond(at_base_case, _base_case, _recursive_case) + scans = _scan(elems_flat) + if reverse: + scans = [tf.reverse(scanned, [axis]) for scanned in scans] + return tree.pack_sequence_as(elems, scans) + +def scatter(indices, values, shape): + return tf.scatter_nd(indices, values, shape) + +def scatter_update(inputs, indices, updates): + return tf.tensor_scatter_nd_update(inputs, indices, updates) + +def slice(inputs, start_indices, shape): + return tf.slice(inputs, start_indices, shape) + +def slice_update(inputs, start_indices, updates): + return dynamic_update_slice(inputs, updates, start_indices) + +def switch(index, branches, *operands): + index = convert_to_tensor(index, 'int32') + index = tf.clip_by_value(index, 0, len(branches) - 1) + + def gen_fn(i): + return lambda : branches[i](*operands) + branch_fns = [gen_fn(i) for i in range(len(branches))] + return tf.switch_case(index, branch_fns) + +def while_loop(cond, body, loop_vars, maximum_iterations=None): + is_tuple = isinstance(loop_vars, (tuple, list)) + loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,) + + def _body(*args): + outputs = body(*args) + return tuple(outputs) if is_tuple else (outputs,) + outputs = tf.while_loop(cond, _body, loop_vars, maximum_iterations=maximum_iterations) + return outputs if is_tuple else outputs[0] + +def fori_loop(lower, upper, body_fun, init_val): + return tf.while_loop(lambda i, val: i < upper, lambda i, val: (i + 1, body_fun(i, val)), (lower, init_val))[1] + +def stop_gradient(variable): + return tf.stop_gradient(variable) + +def unstack(x, num=None, axis=0): + return tf.unstack(x, num=num, axis=axis) + +def random_seed_dtype(): + return 'int64' + +def custom_gradient(fun): + return tf.custom_gradient(f=fun) + +class name_scope(base_name_scope): + + def __init__(self, name, **kwargs): + super().__init__(name, **kwargs) + self._tf_name_scope = tf.name_scope(name) + + def __enter__(self): + name_scope_stack = global_state.get_global_attribute('name_scope_stack', default=[], set_to_default=True) + if self.deduplicate and name_scope_stack: + parent_caller = name_scope_stack[-1].caller + parent_name = name_scope_stack[-1].name + if self.caller is not None and self.caller is parent_caller and (self.name == parent_name): + return self + name_scope_stack.append(self) + self._pop_on_exit = True + self._tf_name_scope.__enter__() + return self + + def __exit__(self, *args, **kwargs): + super().__exit__(*args, **kwargs) + if self._pop_on_exit: + self._tf_name_scope.__exit__(*args, **kwargs) + +def device_scope(device_name): + return tf.device(device_name) + +# File: keras-master/keras/src/backend/tensorflow/distribution_lib.py +"""""" +import tensorflow as tf +from tensorflow.experimental import dtensor + +def list_devices(device_type=None): + device_type = device_type.upper() if device_type else None + tf_devices = tf.config.list_logical_devices(device_type=device_type) + cpu_devices = [] + other_devices = [] + for device in tf_devices: + if device.device_type.lower() == 'cpu': + cpu_devices.append(device) + else: + other_devices.append(device) + if device_type is None: + tf_devices = other_devices if len(other_devices) > 0 else cpu_devices + return [f"{device.device_type.lower()}:{device.name.split(':')[-1]}" for device in tf_devices] + +def distribute_value(value, tensor_layout): + pass + +def _to_dtensor_mesh(device_mesh): + mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape)) + return dtensor.create_distributed_mesh(mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten()) + +def _to_dtensor_layout(tensor_layout): + if tensor_layout.device_mesh is None: + raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.') + sharding_specs = [axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes] + dtensor_mesh = _to_dtensor_mesh(tensor_layout.device_mesh) + return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh) + +# File: keras-master/keras/src/backend/tensorflow/image.py +import functools +import itertools +import operator +import tensorflow as tf +from keras.src import backend +from keras.src.backend.tensorflow.core import convert_to_tensor +RESIZE_INTERPOLATIONS = ('bilinear', 'nearest', 'lanczos3', 'lanczos5', 'bicubic', 'area') + +def rgb_to_grayscale(images, data_format=None): + images = convert_to_tensor(images) + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + original_dtype = images.dtype + compute_dtype = backend.result_type(images.dtype, float) + images = tf.cast(images, compute_dtype) + rgb_weights = convert_to_tensor([0.2989, 0.587, 0.114], dtype=images.dtype) + images = tf.tensordot(images, rgb_weights, axes=(channels_axis, -1)) + images = tf.expand_dims(images, axis=channels_axis) + return tf.cast(images, original_dtype) + +def rgb_to_hsv(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + if data_format == 'channels_first': + if len(images.shape) == 4: + images = tf.transpose(images, (0, 2, 3, 1)) + else: + images = tf.transpose(images, (1, 2, 0)) + images = tf.image.rgb_to_hsv(images) + if data_format == 'channels_first': + if len(images.shape) == 4: + images = tf.transpose(images, (0, 3, 1, 2)) + elif len(images.shape) == 3: + images = tf.transpose(images, (2, 0, 1)) + return images + +def hsv_to_rgb(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + if data_format == 'channels_first': + if len(images.shape) == 4: + images = tf.transpose(images, (0, 2, 3, 1)) + else: + images = tf.transpose(images, (1, 2, 0)) + images = tf.image.hsv_to_rgb(images) + if data_format == 'channels_first': + if len(images.shape) == 4: + images = tf.transpose(images, (0, 3, 1, 2)) + elif len(images.shape) == 3: + images = tf.transpose(images, (2, 0, 1)) + return images + +def resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in RESIZE_INTERPOLATIONS: + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}') + if fill_mode != 'constant': + raise ValueError(f"Invalid value for argument `fill_mode`. Only `'constant'` is supported. Received: fill_mode={fill_mode}") + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError('Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` can be `True`.') + if not len(size) == 2: + raise ValueError(f'Argument `size` must be a tuple of two elements (height, width). Received: size={size}') + size = tuple(size) + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if data_format == 'channels_first': + if len(images.shape) == 4: + images = tf.transpose(images, (0, 2, 3, 1)) + else: + images = tf.transpose(images, (1, 2, 0)) + if crop_to_aspect_ratio: + shape = tf.shape(images) + (height, width) = (shape[-3], shape[-2]) + (target_height, target_width) = size + crop_height = tf.cast(tf.cast(width * target_height, 'float32') / target_width, 'int32') + crop_height = tf.maximum(tf.minimum(height, crop_height), 1) + crop_height = tf.cast(crop_height, 'int32') + crop_width = tf.cast(tf.cast(height * target_width, 'float32') / target_height, 'int32') + crop_width = tf.maximum(tf.minimum(width, crop_width), 1) + crop_width = tf.cast(crop_width, 'int32') + crop_box_hstart = tf.cast(tf.cast(height - crop_height, 'float32') / 2, 'int32') + crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') + if len(images.shape) == 4: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + else: + images = images[crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + elif pad_to_aspect_ratio: + shape = tf.shape(images) + (height, width) = (shape[-3], shape[-2]) + (target_height, target_width) = size + pad_height = tf.cast(tf.cast(width * target_height, 'float32') / target_width, 'int32') + pad_height = tf.maximum(height, pad_height) + pad_height = tf.cast(pad_height, 'int32') + pad_width = tf.cast(tf.cast(height * target_width, 'float32') / target_height, 'int32') + pad_width = tf.maximum(width, pad_width) + pad_width = tf.cast(pad_width, 'int32') + img_box_hstart = tf.cast(tf.cast(pad_height - height, 'float32') / 2, 'int32') + img_box_wstart = tf.cast(tf.cast(pad_width - width, 'float32') / 2, 'int32') + if len(images.shape) == 4: + batch_size = tf.shape(images)[0] + channels = tf.shape(images)[3] + padded_img = tf.cond(img_box_hstart > 0, lambda : tf.concat([tf.ones((batch_size, img_box_hstart, width, channels), dtype=images.dtype) * fill_value, images, tf.ones((batch_size, img_box_hstart, width, channels), dtype=images.dtype) * fill_value], axis=1), lambda : images) + padded_img = tf.cond(img_box_wstart > 0, lambda : tf.concat([tf.ones((batch_size, height, img_box_wstart, channels), dtype=images.dtype) * fill_value, padded_img, tf.ones((batch_size, height, img_box_wstart, channels), dtype=images.dtype) * fill_value], axis=2), lambda : padded_img) + else: + channels = tf.shape(images)[2] + padded_img = tf.cond(img_box_hstart > 0, lambda : tf.concat([tf.ones((img_box_hstart, width, channels), dtype=images.dtype) * fill_value, images, tf.ones((img_box_hstart, width, channels), dtype=images.dtype) * fill_value], axis=0), lambda : images) + padded_img = tf.cond(img_box_wstart > 0, lambda : tf.concat([tf.ones((height, img_box_wstart, channels), dtype=images.dtype) * fill_value, padded_img, tf.ones((height, img_box_wstart, channels), dtype=images.dtype) * fill_value], axis=1), lambda : padded_img) + images = padded_img + resized = tf.image.resize(images, size, method=interpolation, antialias=antialias) + if data_format == 'channels_first': + if len(images.shape) == 4: + resized = tf.transpose(resized, (0, 3, 1, 2)) + elif len(images.shape) == 3: + resized = tf.transpose(resized, (2, 0, 1)) + return resized +AFFINE_TRANSFORM_INTERPOLATIONS = ('nearest', 'bilinear') +AFFINE_TRANSFORM_FILL_MODES = ('constant', 'nearest', 'wrap', 'reflect') + +def affine_transform(images, transform, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS: + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {AFFINE_TRANSFORM_INTERPOLATIONS}. Received: interpolation={interpolation}') + if fill_mode not in AFFINE_TRANSFORM_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected of one {AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}') + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if len(transform.shape) not in (1, 2): + raise ValueError(f'Invalid transform rank: expected rank 1 (single transform) or rank 2 (batch of transforms). Received input with shape: transform.shape={transform.shape}') + need_squeeze = False + if len(images.shape) == 3: + images = tf.expand_dims(images, axis=0) + need_squeeze = True + if len(transform.shape) == 1: + transform = tf.expand_dims(transform, axis=0) + if data_format == 'channels_first': + images = tf.transpose(images, (0, 2, 3, 1)) + affined = tf.raw_ops.ImageProjectiveTransformV3(images=images, transforms=tf.cast(transform, dtype=tf.float32), output_shape=tf.shape(images)[1:-1], fill_value=fill_value, interpolation=interpolation.upper(), fill_mode=fill_mode.upper()) + affined = tf.ensure_shape(affined, images.shape) + if data_format == 'channels_first': + affined = tf.transpose(affined, (0, 3, 1, 2)) + if need_squeeze: + affined = tf.squeeze(affined, axis=0) + return affined + +def _mirror_index_fixer(index, size): + s = size - 1 + return tf.abs((index + s) % (2 * s) - s) + +def _reflect_index_fixer(index, size): + return tf.math.floordiv(_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2) +_INDEX_FIXERS = {'constant': lambda index, size: index, 'nearest': lambda index, size: tf.clip_by_value(index, 0, size - 1), 'wrap': lambda index, size: index % size, 'mirror': _mirror_index_fixer, 'reflect': _reflect_index_fixer} + +def _nearest_indices_and_weights(coordinate): + coordinate = coordinate if coordinate.dtype.is_integer else tf.round(coordinate) + index = tf.cast(coordinate, tf.int32) + weight = tf.constant(1, coordinate.dtype) + return [(index, weight)] + +def _linear_indices_and_weights(coordinate): + lower = tf.floor(coordinate) + upper_weight = coordinate - lower + lower_weight = 1 - upper_weight + index = tf.cast(lower, tf.int32) + return [(index, lower_weight), (index + 1, upper_weight)] + +def map_coordinates(inputs, coordinates, order, fill_mode='constant', fill_value=0.0): + input_arr = convert_to_tensor(inputs) + coordinate_arrs = convert_to_tensor(coordinates) + if coordinate_arrs.shape[0] != len(input_arr.shape): + raise ValueError(f'First dim of `coordinates` must be the same as the rank of `inputs`. Received inputs with shape: {input_arr.shape} and coordinate leading dim of {coordinate_arrs.shape[0]}') + if len(coordinate_arrs.shape) < 2: + raise ValueError(f'Invalid coordinates rank: expected at least rank 2. Received input with shape: {coordinate_arrs.shape}') + coordinate_arrs = tf.unstack(coordinate_arrs, axis=0) + fill_value = convert_to_tensor(tf.cast(fill_value, input_arr.dtype)) + index_fixer = _INDEX_FIXERS.get(fill_mode) + if index_fixer is None: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected one of {set(_INDEX_FIXERS.keys())}. Received: fill_mode={fill_mode}') + + def is_valid(index, size): + if fill_mode == 'constant': + return (0 <= index) & (index < size) + else: + return True + if order == 0: + interp_fun = _nearest_indices_and_weights + elif order == 1: + interp_fun = _linear_indices_and_weights + else: + raise NotImplementedError('map_coordinates currently requires order<=1') + valid_1d_interpolations = [] + for (coordinate, size) in zip(coordinate_arrs, input_arr.shape): + interp_nodes = interp_fun(coordinate) + valid_interp = [] + for (index, weight) in interp_nodes: + fixed_index = index_fixer(index, size) + valid = is_valid(index, size) + valid_interp.append((fixed_index, valid, weight)) + valid_1d_interpolations.append(valid_interp) + outputs = [] + for items in itertools.product(*valid_1d_interpolations): + (indices, validities, weights) = zip(*items) + indices = tf.transpose(tf.stack(indices)) + + def fast_path(): + return tf.transpose(tf.gather_nd(input_arr, indices)) + + def slow_path(): + all_valid = functools.reduce(operator.and_, validities) + return tf.where(all_valid, tf.transpose(tf.gather_nd(input_arr, indices)), fill_value) + contribution = tf.cond(tf.reduce_all(validities), fast_path, slow_path) + outputs.append(functools.reduce(operator.mul, weights) * tf.cast(contribution, weights[0].dtype)) + result = functools.reduce(operator.add, outputs) + if input_arr.dtype.is_integer: + result = result if result.dtype.is_integer else tf.round(result) + return tf.cast(result, input_arr.dtype) + +# File: keras-master/keras/src/backend/tensorflow/layer.py +import tensorflow as tf +from keras.src import tree +from keras.src.backend.tensorflow.trackable import KerasAutoTrackable +from keras.src.utils import tf_utils +from keras.src.utils import tracking + +class TFLayer(KerasAutoTrackable): + + def __init__(self, *args, **kwargs): + self._saved_model_inputs_spec = None + self._saved_model_arg_spec = None + self._tracked = [] + + @tf.__internal__.tracking.no_automatic_dependency_tracking + def _set_save_spec(self, inputs, args=None, kwargs=None): + if self._saved_model_inputs_spec is not None: + return + inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs) + args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or []) + kwargs_spec = {} + for (key, kwarg) in kwargs.items(): + flat_kwarg = tree.flatten(kwarg) + flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg] + if any((s is None for s in flat_specs)): + continue + kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs) + self._saved_model_inputs_spec = inputs_spec + self._saved_model_arg_spec = ([inputs_spec] + list(args_spec), kwargs_spec) + + def _trackable_children(self, save_type='checkpoint', **kwargs): + if save_type == 'savedmodel': + train_function = getattr(self, 'train_function', None) + test_function = getattr(self, 'test_function', None) + predict_function = getattr(self, 'predict_function', None) + self.train_function = None + self.test_function = None + self.predict_function = None + children = super()._trackable_children(save_type, **kwargs) + if save_type == 'savedmodel': + self.train_function = train_function + self.test_function = test_function + self.predict_function = predict_function + for tracked_attr in self._tracked: + tracked_item = getattr(self, tracked_attr) + if isinstance(tracked_item, tracking.TrackedList): + children[tracked_attr] = list(tracked_item) + if isinstance(tracked_item, tracking.TrackedDict): + children[tracked_attr] = dict(tracked_item) + if isinstance(tracked_item, tracking.TrackedSet): + children[tracked_attr] = list(tracked_item) + return children + + @property + def _default_save_signature(self): + from keras.src.models.functional import Functional + from keras.src.models.model import Model + from keras.src.models.sequential import Sequential + if not isinstance(self, Model): + return None + inputs = None + if isinstance(self, Sequential) and getattr(self, '_functional', None) is not None: + inputs = self._functional.input + elif isinstance(self, Functional): + inputs = self.input + if inputs is not None: + input_signature = (tree.map_structure(lambda x: tf.TensorSpec(x.shape, x.dtype), inputs),) + else: + input_signature = tuple((tree.map_shape_structure(lambda s: tf.TensorSpec(s, self.input_dtype), value) for value in self._build_shapes_dict.values())) + + @tf.function(input_signature=input_signature) + def serving_default(inputs): + return self(inputs) + return serving_default + +# File: keras-master/keras/src/backend/tensorflow/linalg.py +import tensorflow as tf +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor + +def cholesky(a): + out = tf.linalg.cholesky(a) + return tf.debugging.check_numerics(out, 'Cholesky') + +def det(a): + return tf.linalg.det(a) + +def eig(a): + return tf.linalg.eig(a) + +def eigh(a): + return tf.linalg.eigh(a) + +def inv(a): + return tf.linalg.inv(a) + +def lu_factor(a): + (lu, p) = tf.linalg.lu(a) + return (lu, tf.math.invert_permutation(p)) + +def norm(x, ord=None, axis=None, keepdims=False): + from keras.src.backend.tensorflow.numpy import moveaxis + x = convert_to_tensor(x) + x_shape = x.shape + ndim = x_shape.rank + if axis is None: + axis = tuple(range(ndim)) + elif isinstance(axis, int): + axis = (axis,) + if any((a < -ndim or a >= ndim for a in axis)): + raise ValueError(f'All `axis` values must be in the range [-ndim, ndim). Received inputs with ndim={ndim}, while axis={axis}') + axis = axis[0] if len(axis) == 1 else axis + num_axes = 1 if isinstance(axis, int) else len(axis) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + if num_axes == 1: + if ord is None or ord == 2: + return tf.sqrt(tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)) + elif ord == float('inf'): + return tf.math.reduce_max(tf.math.abs(x), axis=axis, keepdims=keepdims) + elif ord == float('-inf'): + return tf.math.reduce_min(tf.math.abs(x), axis=axis, keepdims=keepdims) + elif ord == 0: + return tf.math.reduce_sum(tf.cast(tf.not_equal(x, 0), dtype=x.dtype), axis=axis, keepdims=keepdims) + elif isinstance(ord, str): + raise ValueError(f'Invalid `ord` argument for vector norm. Received: ord={ord}') + else: + ord = convert_to_tensor(ord, dtype=x.dtype) + out = tf.math.reduce_sum(tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims) + return tf.pow(out, 1.0 / ord) + elif num_axes == 2: + (row_axis, col_axis) = (axis[0], axis[1]) + row_axis = row_axis + ndim if row_axis < 0 else row_axis + col_axis = col_axis + ndim if col_axis < 0 else col_axis + if ord is None or ord == 'fro': + return tf.sqrt(tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)) + elif ord == 1: + if not keepdims and col_axis > row_axis: + col_axis -= 1 + x = tf.math.reduce_max(tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims), axis=col_axis, keepdims=keepdims) + elif ord == -1: + if not keepdims and col_axis > row_axis: + col_axis -= 1 + x = tf.math.reduce_min(tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims), axis=col_axis, keepdims=keepdims) + elif ord == float('inf'): + if not keepdims and row_axis > col_axis: + row_axis -= 1 + x = tf.math.reduce_max(tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims), axis=row_axis, keepdims=keepdims) + elif ord == float('-inf'): + if not keepdims and row_axis > col_axis: + row_axis -= 1 + x = tf.math.reduce_min(tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims), axis=row_axis, keepdims=keepdims) + elif ord in ('nuc', 2, -2): + x = moveaxis(x, axis, (-2, -1)) + if ord == -2: + x = tf.math.reduce_min(tf.linalg.svd(x, compute_uv=False), axis=-1) + elif ord == 2: + x = tf.math.reduce_max(tf.linalg.svd(x, compute_uv=False), axis=-1) + else: + x = tf.math.reduce_sum(tf.linalg.svd(x, compute_uv=False), axis=-1) + if keepdims: + x = tf.expand_dims(x, axis[0]) + x = tf.expand_dims(x, axis[1]) + else: + raise ValueError(f'Invalid `ord` argument for matrix norm. Received: ord={ord}') + return x + else: + raise ValueError(f'Invalid axis values. Received: axis={axis}') + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + if mode == 'reduced': + return tf.linalg.qr(x) + return tf.linalg.qr(x, full_matrices=True) + +def solve(a, b): + if tf.rank(b) == tf.rank(a) - 1: + b = tf.expand_dims(b, axis=-1) + return tf.squeeze(tf.linalg.solve(a, b), axis=-1) + return tf.linalg.solve(a, b) + +def solve_triangular(a, b, lower=False): + if b.shape.ndims == a.shape.ndims - 1: + b = tf.expand_dims(b, axis=-1) + return tf.squeeze(tf.linalg.triangular_solve(a, b, lower=lower), axis=-1) + return tf.linalg.triangular_solve(a, b, lower=lower) + +def svd(x, full_matrices=True, compute_uv=True): + if compute_uv is False: + return tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=False) + (s, u, v) = tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv) + return (u, s, tf.linalg.adjoint(v)) + +def lstsq(a, b, rcond=None): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + if a.shape[0] != b.shape[0]: + raise ValueError('Leading dimensions of input arrays must match') + b_orig_ndim = b.ndim + if b_orig_ndim == 1: + b = b[:, None] + if a.ndim != 2: + raise TypeError(f'{a.ndim}-dimensional array given. Array must be two-dimensional') + if b.ndim != 2: + raise TypeError(f'{b.ndim}-dimensional array given. Array must be one or two-dimensional') + (m, n) = a.shape + dtype = a.dtype + eps = tf.experimental.numpy.finfo(dtype).eps + if a.shape == (): + s = tf.zeros(0, dtype=a.dtype) + x = tf.zeros((n, *b.shape[1:]), dtype=a.dtype) + else: + if rcond is None: + rcond = eps * max(n, m) + else: + rcond = tf.where(rcond < 0, eps, rcond) + (u, s, vt) = svd(a, full_matrices=False) + mask = s >= tf.convert_to_tensor(rcond, dtype=s.dtype) * s[0] + safe_s = tf.cast(tf.where(mask, s, 1), dtype=a.dtype) + s_inv = tf.where(mask, 1 / safe_s, 0)[:, tf.newaxis] + u_t_b = tf.matmul(tf.transpose(tf.math.conj(u)), b) + x = tf.matmul(tf.transpose(tf.math.conj(vt)), s_inv * u_t_b) + if b_orig_ndim == 1: + x = tf.reshape(x, [-1]) + return x + +# File: keras-master/keras/src/backend/tensorflow/math.py +import tensorflow as tf +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor + +def segment_sum(data, segment_ids, num_segments=None, sorted=False): + if sorted: + if num_segments is not None: + raise ValueError(f'Argument `num_segments` cannot be set when sorted is True when using the tensorflow backend.Received: num_segments={num_segments}, sorted={sorted}.') + return tf.math.segment_sum(data, segment_ids) + else: + if num_segments is None: + (unique_segment_ids, _) = tf.unique(segment_ids) + num_segments = tf.shape(unique_segment_ids)[0] + return tf.math.unsorted_segment_sum(data, segment_ids, num_segments) + +def segment_max(data, segment_ids, num_segments=None, sorted=False): + if sorted: + if num_segments is not None: + raise ValueError(f'Argument `num_segments` cannot be set when sorted is True when using the tensorflow backend.Received: num_segments={num_segments}, sorted={sorted}.') + return tf.math.segment_max(data, segment_ids) + else: + if num_segments is None: + (unique_segment_ids, _) = tf.unique(segment_ids) + num_segments = tf.shape(unique_segment_ids)[0] + return tf.math.unsorted_segment_max(data, segment_ids, num_segments) + +def top_k(x, k, sorted=True): + return tf.math.top_k(x, k, sorted=sorted) + +def in_top_k(targets, predictions, k): + return tf.math.in_top_k(targets, predictions, k) + +def logsumexp(x, axis=None, keepdims=False): + return tf.math.reduce_logsumexp(x, axis=axis, keepdims=keepdims) + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + if mode == 'reduced': + return tf.linalg.qr(x) + return tf.linalg.qr(x, full_matrices=True) + +def extract_sequences(x, sequence_length, sequence_stride): + return tf.signal.frame(x, frame_length=sequence_length, frame_step=sequence_stride, axis=-1, pad_end=False) + +def _get_complex_tensor_from_tuple(x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Received: x={x}') + (real, imag) = x + real = convert_to_tensor(real) + imag = convert_to_tensor(imag) + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if not real.dtype.is_floating or not imag.dtype.is_floating: + raise ValueError(f'At least one tensor in input `x` is not of type float.Received: x={x}.') + complex_input = tf.dtypes.complex(real, imag) + return complex_input + +def fft(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = tf.signal.fft(complex_input) + return (tf.math.real(complex_output), tf.math.imag(complex_output)) + +def fft2(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = tf.signal.fft2d(complex_input) + return (tf.math.real(complex_output), tf.math.imag(complex_output)) + +def rfft(x, fft_length=None): + if fft_length is not None: + fft_length = [fft_length] + complex_output = tf.signal.rfft(x, fft_length=fft_length) + return (tf.math.real(complex_output), tf.math.imag(complex_output)) + +def irfft(x, fft_length=None): + complex_input = _get_complex_tensor_from_tuple(x) + if fft_length is not None: + fft_length = [fft_length] + return tf.signal.irfft(complex_input, fft_length) + +def stft(x, sequence_length, sequence_stride, fft_length, window='hann', center=True): + if standardize_dtype(x.dtype) not in {'float32', 'float64'}: + raise TypeError(f'Invalid input type. Expected `float32` or `float64`. Received: input type={x.dtype}') + if fft_length < sequence_length: + raise ValueError(f'`fft_length` must equal or larger than `sequence_length`. Received: sequence_length={sequence_length}, fft_length={fft_length}') + if isinstance(window, str): + if window not in {'hann', 'hamming'}: + raise ValueError(f'If a string is passed to `window`, it must be one of `"hann"`, `"hamming"`. Received: window={window}') + x = convert_to_tensor(x) + if center: + pad_width = [(0, 0) for _ in range(len(x.shape))] + pad_width[-1] = (fft_length // 2, fft_length // 2) + x = tf.pad(x, pad_width, mode='reflect') + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + if window == 'hann': + win_array = tf.signal.hann_window(sequence_length, periodic=True, dtype=x.dtype) + else: + win_array = tf.signal.hamming_window(sequence_length, periodic=True, dtype=x.dtype) + else: + win_array = convert_to_tensor(window, dtype=x.dtype) + if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win_array.shape}') + win_array = tf.pad(win_array, [[l_pad, r_pad]]) + + def win(frame_step, dtype): + return win_array + else: + win = None + result = tf.signal.stft(x, frame_length=sequence_length + l_pad + r_pad, frame_step=sequence_stride, fft_length=fft_length, window_fn=win) + return (tf.math.real(result), tf.math.imag(result)) + +def istft(x, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + complex_input = _get_complex_tensor_from_tuple(x) + dtype = tf.math.real(complex_input).dtype + expected_output_len = fft_length + sequence_stride * (tf.shape(complex_input)[-2] - 1) + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + if window is not None: + if isinstance(window, str): + if window == 'hann': + win_array = tf.signal.hann_window(sequence_length, periodic=True, dtype=dtype) + else: + win_array = tf.signal.hamming_window(sequence_length, periodic=True, dtype=dtype) + else: + win_array = convert_to_tensor(window, dtype=dtype) + if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win_array.shape}') + win_array = tf.pad(win_array, [[l_pad, r_pad]]) + win = tf.signal.inverse_stft_window_fn(sequence_stride, lambda frame_step, dtype: win_array) + else: + win = None + x = tf.signal.inverse_stft(complex_input, frame_length=sequence_length + l_pad + r_pad, frame_step=sequence_stride, fft_length=fft_length, window_fn=win) + start = 0 if center is False else fft_length // 2 + if length is not None: + end = start + length + elif center is True: + end = -(fft_length // 2) + else: + end = expected_output_len + return x[..., start:end] + +def rsqrt(x): + return tf.math.rsqrt(x) + +def erf(x): + return tf.math.erf(x) + +def erfinv(x): + return tf.math.erfinv(x) + +def solve(a, b): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return tf.linalg.solve(a, b) + +def norm(x, ord=None, axis=None, keepdims=False): + from keras.src.backend.tensorflow.numpy import moveaxis + x = convert_to_tensor(x) + x_shape = x.shape + ndim = x_shape.rank + if axis is None: + axis = tuple(range(ndim)) + elif isinstance(axis, int): + axis = (axis,) + axis = axis[0] if len(axis) == 1 else axis + num_axes = 1 if isinstance(axis, int) else len(axis) + if num_axes == 1 and ord is None: + ord = 'euclidean' + elif num_axes == 2 and ord is None: + ord = 'fro' + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + if num_axes == 1 and ord in ('euclidean', 1, 2, float('inf')) or (num_axes == 2 and ord in ('euclidean', 'fro', 1, 2, float('inf'))): + return tf.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) + if num_axes == 1 and ord not in ('fro', 'nuc'): + if ord == float('-inf'): + return tf.math.reduce_min(tf.math.abs(x), axis=axis, keepdims=keepdims) + elif ord == 0: + return tf.math.reduce_sum(tf.cast(tf.not_equal(x, 0), dtype=x.dtype), axis=axis, keepdims=keepdims) + else: + ord = convert_to_tensor(ord, dtype=x.dtype) + out = tf.math.reduce_sum(tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims) + return tf.pow(out, 1.0 / ord) + elif num_axes == 2 and ord in ('nuc', float('-inf'), -2, -1): + (row_axis, col_axis) = (axis[0], axis[1]) + row_axis = row_axis + ndim if row_axis < 0 else row_axis + col_axis = col_axis + ndim if col_axis < 0 else col_axis + if ord == float('-inf'): + if not keepdims and row_axis > col_axis: + row_axis -= 1 + x = tf.math.reduce_min(tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims), axis=row_axis, keepdims=keepdims) + elif ord == -1: + if not keepdims and col_axis > row_axis: + col_axis -= 1 + x = tf.math.reduce_min(tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims), axis=col_axis, keepdims=keepdims) + else: + x = moveaxis(x, axis, (-2, -1)) + if ord == -2: + x = tf.math.reduce_min(tf.linalg.svd(x, compute_uv=False), axis=-1) + else: + x = tf.math.reduce_sum(tf.linalg.svd(x, compute_uv=False), axis=-1) + if keepdims: + x = tf.expand_dims(x, axis[0]) + x = tf.expand_dims(x, axis[1]) + return x + if num_axes == 1: + raise ValueError(f'Invalid `ord` argument for vector norm. Received: ord={ord}') + elif num_axes == 2: + raise ValueError(f'Invalid `ord` argument for matrix norm. Received: ord={ord}') + else: + raise ValueError(f'Invalid axis values. Received: axis={axis}') + +def logdet(x): + x = convert_to_tensor(x) + return tf.linalg.logdet(x) + +# File: keras-master/keras/src/backend/tensorflow/nn.py +import math +import warnings +import tensorflow as tf +from keras.src import backend +from keras.src.backend.common.backend_utils import compute_conv_transpose_output_shape +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor + +def relu(x): + return tf.nn.relu(x) + +def relu6(x): + return tf.nn.relu6(x) + +def sigmoid(x): + logits = x + output = tf.nn.sigmoid(x) + output._keras_logits = logits + return output + +def tanh(x): + return tf.nn.tanh(x) + +def softplus(x): + return tf.math.softplus(x) + +def softsign(x): + return tf.nn.softsign(x) + +def silu(x): + return tf.nn.silu(x) + +def log_sigmoid(x): + return tf.math.log_sigmoid(x) + +def leaky_relu(x, negative_slope=0.2): + return tf.nn.leaky_relu(x, alpha=negative_slope) + +def hard_sigmoid(x): + x = convert_to_tensor(x) + return relu6(x + tf.constant(3.0, x.dtype)) / tf.constant(6.0, x.dtype) + +def hard_silu(x): + return x * hard_sigmoid(x) + +def elu(x, alpha=1.0): + res = tf.nn.elu(x) + if alpha == 1: + return res + else: + return tf.where(x > 0, res, alpha * res) + +def selu(x): + return tf.nn.selu(x) + +def gelu(x, approximate=True): + x = convert_to_tensor(x) + return tf.nn.gelu(x, approximate=approximate) + +def softmax(x, axis=-1): + logits = x + if axis is None: + output = tf.reshape(x, [-1]) + output = tf.nn.softmax(output, axis=-1) + output = tf.reshape(output, tf.shape(x)) + else: + output = tf.nn.softmax(x, axis=axis) + output._keras_logits = logits + return output + +def log_softmax(x, axis=-1): + if axis is None: + output = tf.reshape(x, [-1]) + output = tf.nn.log_softmax(output, axis=-1) + return tf.reshape(output, tf.shape(x)) + return tf.nn.log_softmax(x, axis=axis) + +def _transpose_spatial_inputs(inputs): + num_spatial_dims = len(inputs.shape) - 2 + if num_spatial_dims == 1: + inputs = tf.transpose(inputs, (0, 2, 1)) + elif num_spatial_dims == 2: + inputs = tf.transpose(inputs, (0, 2, 3, 1)) + elif num_spatial_dims == 3: + inputs = tf.transpose(inputs, (0, 2, 3, 4, 1)) + else: + raise ValueError(f"Pooling inputs's shape must be 3, 4 or 5, corresponding to 1D, 2D and 3D inputs. But received shape: {inputs.shape}.") + return inputs + +def _transpose_spatial_outputs(outputs): + num_spatial_dims = len(outputs.shape) - 2 + if num_spatial_dims == 1: + outputs = tf.transpose(outputs, (0, 2, 1)) + elif num_spatial_dims == 2: + outputs = tf.transpose(outputs, (0, 3, 1, 2)) + elif num_spatial_dims == 3: + outputs = tf.transpose(outputs, (0, 4, 1, 2, 3)) + return outputs + +def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = backend.standardize_data_format(data_format) + strides = pool_size if strides is None else strides + padding = padding.upper() + tf_data_format = _convert_data_format('channels_last', len(inputs.shape)) + if data_format == 'channels_first': + inputs = _transpose_spatial_inputs(inputs) + outputs = tf.nn.max_pool(inputs, pool_size, strides, padding, tf_data_format) + if data_format == 'channels_first': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def average_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = backend.standardize_data_format(data_format) + strides = pool_size if strides is None else strides + padding = padding.upper() + tf_data_format = _convert_data_format('channels_last', len(inputs.shape)) + if data_format == 'channels_first': + inputs = _transpose_spatial_inputs(inputs) + outputs = tf.nn.avg_pool(inputs, pool_size, strides, padding, tf_data_format) + if data_format == 'channels_first': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def _convert_data_format(data_format, ndim): + if data_format == 'channels_last': + if ndim == 3: + return 'NWC' + elif ndim == 4: + return 'NHWC' + elif ndim == 5: + return 'NDHWC' + else: + raise ValueError(f'Input rank not supported: {ndim}. Expected values are [3, 4, 5]') + elif data_format == 'channels_first': + if ndim == 3: + return 'NCW' + elif ndim == 4: + return 'NCHW' + elif ndim == 5: + return 'NCDHW' + else: + raise ValueError(f'Input rank not supported: {ndim}. Expected values are [3, 4, 5]') + else: + raise ValueError(f'Invalid data_format: {data_format}. Expected values are ["channels_first", "channels_last"]') + +def conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + + def _conv(): + tf_data_format = _convert_data_format(data_format, len(inputs.shape)) + return tf.nn.convolution(inputs, kernel, strides, padding.upper(), data_format=tf_data_format, dilations=dilation_rate) + + @tf.function(jit_compile=True) + def _conv_xla(): + return _conv() + needs_xla = data_format == 'channels_first' and len(inputs.shape) == 5 + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + channels = inputs.shape[-1] + else: + channels = inputs.shape[1] + needs_xla = needs_xla or channels != kernel.shape[-2] + if needs_xla: + return _conv_xla() + else: + return _conv() + +def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = len(inputs.shape) - 2 + if num_spatial_dims > 2: + raise ValueError('`inputs` rank must be 3 (1D conv) or 4 (2D conv). Received: {inputs.ndim}.') + tf_data_format = _convert_data_format(data_format, 4) + padding = padding.upper() + if isinstance(strides, int): + strides = (strides,) * num_spatial_dims + if isinstance(dilation_rate, int): + dilation_rate = (dilation_rate,) * num_spatial_dims + if num_spatial_dims == 1: + if data_format == 'channels_last': + strides = (1,) + strides * 2 + (1,) + spatial_start_dim = 1 + else: + strides = (1, 1) + strides * 2 + spatial_start_dim = 2 + inputs = tf.expand_dims(inputs, spatial_start_dim) + kernel = tf.expand_dims(kernel, axis=0) + dilation_rate = None if dilation_rate is None else (1,) + dilation_rate + outputs = tf.nn.depthwise_conv2d(inputs, kernel, strides, padding, data_format=tf_data_format, dilations=dilation_rate) + return tf.squeeze(outputs, [spatial_start_dim]) + if data_format == 'channels_last': + strides = (1,) + strides + (1,) + spatial_start_dim = 1 + else: + strides = (1, 1) + strides + spatial_start_dim = 2 + return tf.nn.depthwise_conv2d(inputs, kernel, strides, padding, data_format=tf_data_format, dilations=dilation_rate) + +def separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + num_spatial_dims = len(inputs.shape) - 2 + if num_spatial_dims > 2: + raise ValueError(f'`num_spatial_dims` must be 1 or 2. Received: num_spatial_dims={num_spatial_dims}.') + tf_data_format = _convert_data_format(data_format, 4) + padding = padding.upper() + if isinstance(strides, int): + strides = (strides,) * num_spatial_dims + if isinstance(dilation_rate, int): + dilation_rate = (dilation_rate,) * num_spatial_dims + if num_spatial_dims == 1: + if data_format == 'channels_last': + strides = (1,) + strides * 2 + (1,) + spatial_start_dim = 1 + else: + strides = (1, 1) + strides * 2 + spatial_start_dim = 2 + inputs = tf.expand_dims(inputs, spatial_start_dim) + depthwise_kernel = tf.expand_dims(depthwise_kernel, axis=0) + pointwise_kernel = tf.expand_dims(pointwise_kernel, axis=0) + dilation_rate = None if dilation_rate is None else (1,) + dilation_rate + outputs = tf.nn.separable_conv2d(inputs, depthwise_kernel, pointwise_kernel, strides, padding, data_format=tf_data_format, dilations=dilation_rate) + return tf.squeeze(outputs, [spatial_start_dim]) + if data_format == 'channels_last': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + return tf.nn.separable_conv2d(inputs, depthwise_kernel, pointwise_kernel, strides, padding, data_format=tf_data_format, dilations=dilation_rate) + +def conv_transpose(inputs, kernel, strides=1, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + data_format = backend.standardize_data_format(data_format) + tf_data_format = _convert_data_format(data_format, len(inputs.shape)) + kernel_size = kernel.shape[:-2] + filters = kernel.shape[-2] + input_shape = list(inputs.shape) + symbolic_shape = tf.shape(inputs) + for (i, e) in enumerate(input_shape): + if e is None: + input_shape[i] = symbolic_shape[i] + output_shape = compute_conv_transpose_output_shape(input_shape, kernel_size, filters, strides, padding, output_padding, data_format, dilation_rate) + return tf.nn.conv_transpose(inputs, kernel, output_shape, strides, padding=padding.upper(), data_format=tf_data_format, dilations=dilation_rate) + +def one_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + x = convert_to_tensor(x, dtype='int64') + if dtype is None: + dtype = 'float32' + else: + dtype = backend.standardize_dtype(dtype) + if sparse: + if axis < 0: + axis = axis + len(x.shape) + 1 + values_count = math.prod(x.shape) + values = tf.reshape(x, (values_count,)) + values = tf.cast(tf.greater_equal(values, 0), dtype=dtype) + indices = [tf.range(dim) for dim in x.shape] + indices = tf.meshgrid(*indices, indexing='ij') + indices.insert(axis, tf.maximum(x, 0)) + indices = [tf.reshape(a, (values_count, 1)) for a in indices] + indices = [tf.cast(a, tf.int64) for a in indices] + indices = tf.concat(indices, axis=1) + shape = list(x.shape) + shape.insert(axis, num_classes) + return tf.SparseTensor(indices, values, shape) + (on_value, off_value) = (True, False) if dtype == 'bool' else (None, None) + return tf.one_hot(x, num_classes, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) + +def multi_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + reduction_axis = 1 if len(x.shape) > 1 else 0 + if backend.standardize_dtype(dtype) == 'bool': + if sparse: + outputs = one_hot(x, num_classes, axis=axis, dtype='int8', sparse=True) + outputs = tf.sparse.reduce_max(outputs, axis=reduction_axis, output_is_sparse=True) + outputs_shape = outputs.shape + outputs = tf.cast(outputs, dtype) + outputs.set_shape(outputs_shape) + return outputs + else: + outputs = one_hot(x, num_classes, axis=axis, dtype=dtype) + return tf.reduce_any(outputs, axis=reduction_axis) + elif sparse: + outputs = one_hot(x, num_classes, axis=axis, dtype=dtype, sparse=True) + return tf.sparse.reduce_max(outputs, axis=reduction_axis, output_is_sparse=True) + else: + outputs = one_hot(x, num_classes, axis=axis, dtype=dtype) + return tf.reduce_max(outputs, axis=reduction_axis) + +def _get_logits(output, from_logits, op_type, fn_name): + output_ = output + from_logits_ = from_logits + has_keras_logits = hasattr(output, '_keras_logits') + if has_keras_logits: + output_ = output._keras_logits + from_logits_ = True + from_expected_op_type = (hasattr(output, 'op') and (not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))) and (output.op.type == op_type)) and (not has_keras_logits) + if from_expected_op_type: + assert len(output.op.inputs) == 1 + output_ = output.op.inputs[0] + from_logits_ = True + if from_logits and (has_keras_logits or from_expected_op_type): + warnings.warn(f'"`{fn_name}` received `from_logits=True`, but the `output` argument was produced by a {op_type} activation and thus does not represent logits. Was this intended?', stacklevel=2) + return (output_, from_logits_) + +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + if len(target.shape) < 1: + raise ValueError(f'Arguments `target` and `output` must be at least rank 1. Received: target.shape={target.shape}, output.shape={output.shape}') + if len(target.shape) != len(output.shape): + raise ValueError(f'Arguments `target` and `output` must have the same rank (ndim). Received: target.shape={target.shape}, output.shape={output.shape}') + for (e1, e2) in zip(target.shape, output.shape): + if e1 is not None and e2 is not None and (e1 != e2): + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + (output, from_logits) = _get_logits(output, from_logits, 'Softmax', 'categorical_crossentropy') + if from_logits: + return tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=output, axis=axis) + output = output / tf.reduce_sum(output, axis, keepdims=True) + output = tf.clip_by_value(output, backend.epsilon(), 1.0 - backend.epsilon()) + return -tf.reduce_sum(target * tf.math.log(output), axis) + +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + if axis != -1 and axis != len(output.shape) - 1: + raise ValueError(f'Only axis=-1 is currently supported. Received: axis={axis}') + (output, from_logits) = _get_logits(output, from_logits, 'Softmax', 'sparse_categorical_crossentropy') + target = tf.convert_to_tensor(target) + target = tf.cast(target, dtype='int64') + output = tf.convert_to_tensor(output) + if len(target.shape) == len(output.shape) and target.shape[-1] == 1: + target = tf.squeeze(target, axis=-1) + if len(output.shape) < 1: + raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') + if len(target.shape) != len(output.shape[:-1]): + raise ValueError(f'Argument `output` must have rank (ndim) `target.ndim - 1`. Received: target.shape={target.shape}, output.shape={output.shape}') + for (e1, e2) in zip(target.shape, output.shape[:-1]): + if e1 is not None and e2 is not None and (e1 != e2): + raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') + if not from_logits: + output = tf.clip_by_value(output, backend.epsilon(), 1 - backend.epsilon()) + output = tf.math.log(output) + result = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) + return result + +def binary_crossentropy(target, output, from_logits=False): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + if len(target.shape) != len(output.shape): + raise ValueError(f'Arguments `target` and `output` must have the same rank (ndim). Received: target.shape={target.shape}, output.shape={output.shape}') + for (e1, e2) in zip(target.shape, output.shape): + if e1 is not None and e2 is not None and (e1 != e2): + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + (output, from_logits) = _get_logits(output, from_logits, 'Sigmoid', 'binary_crossentropy') + if from_logits: + return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) + output = tf.clip_by_value(output, backend.epsilon(), 1.0 - backend.epsilon()) + bce = target * tf.math.log(output) + bce += (1 - target) * tf.math.log(1 - output) + return -bce + +def moments(x, axes, keepdims=False, synchronized=False): + need_cast = False + ori_dtype = backend.standardize_dtype(x.dtype) + if ori_dtype in ('float16', 'bfloat16'): + need_cast = True + x = cast(x, 'float32') + if synchronized: + (mean, variance) = _compute_moments_sync(x, axes, keepdims) + else: + (mean, variance) = _compute_moments(x, axes, keepdims) + if need_cast: + mean = tf.clip_by_value(mean, tf.float16.min, tf.float16.max) + variance = tf.clip_by_value(variance, tf.float16.min, tf.float16.max) + mean = cast(mean, ori_dtype) + variance = cast(variance, ori_dtype) + return (mean, variance) + +def _compute_moments_sync(x, axes, keepdims): + replica_ctx = tf.distribute.get_replica_context() + if not replica_ctx: + return _compute_moments(x, axes, keepdims) + local_count = tf.ones_like(x, name='count') + local_sum = tf.reduce_sum(x, axis=axes, keepdims=True) + local_squared_sum = tf.reduce_sum(tf.square(x), axis=axes, keepdims=True) + local_count = tf.reduce_sum(local_count, axis=axes, keepdims=True) + y_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_sum) + y_squared_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_squared_sum) + count_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_count) + mean = tf.math.divide_no_nan(y_sum, count_sum) + y_squared_mean = tf.math.divide_no_nan(y_squared_sum, count_sum) + variance = tf.maximum(y_squared_mean - tf.square(mean), 0.0) + if not keepdims: + mean = tf.squeeze(mean, axes) + variance = tf.squeeze(variance, axes) + return (mean, variance) + +def _compute_moments(x, axes, keepdims): + return tf.nn.moments(x, axes, keepdims=keepdims) + +def batch_normalization(x, mean, variance, axis, offset=None, scale=None, epsilon=0.001): + if axis != -1: + shape = [1] * len(x.shape) + shape[axis] = mean.shape[0] + mean = tf.reshape(mean, shape) + variance = tf.reshape(variance, shape) + if offset is not None: + offset = tf.reshape(offset, shape) + if scale is not None: + scale = tf.reshape(scale, shape) + return tf.nn.batch_normalization(x=x, mean=mean, variance=variance, offset=offset, scale=scale, variance_epsilon=epsilon) + +def ctc_loss(target, output, target_length, output_length, mask_index=0): + target = convert_to_tensor(target) + output = convert_to_tensor(output) + target = tf.cast(target, dtype='int32') + result_dtype = backend.result_type(output.dtype, 'float32') + compute_dtype = 'float32' if result_dtype == 'float64' else result_dtype + output = tf.cast(output, compute_dtype) + loss = tf.nn.ctc_loss(labels=target, logits=output, label_length=target_length, logit_length=output_length, blank_index=mask_index, logits_time_major=False) + return tf.cast(loss, result_dtype) + +def ctc_decode(inputs, sequence_lengths, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + inputs = convert_to_tensor(inputs) + input_shape = tf.shape(inputs) + (num_samples, num_steps) = (input_shape[0], input_shape[1]) + inputs = tf.transpose(inputs, (1, 0, 2)) + dtype = backend.result_type(inputs.dtype, 'float32') + inputs = tf.cast(inputs, dtype) + sequence_lengths = convert_to_tensor(sequence_lengths, dtype='int32') + if strategy == 'greedy': + (decoded, scores) = tf.nn.ctc_greedy_decoder(inputs=inputs, sequence_length=sequence_lengths, merge_repeated=merge_repeated, blank_index=mask_index) + elif strategy == 'beam_search': + if mask_index is not None: + inputs_before = inputs[..., :mask_index] + inputs_mask = inputs[..., mask_index:mask_index + 1] + inputs_after = inputs[..., mask_index + 1:] + inputs = tf.concat([inputs_before, inputs_after, inputs_mask], axis=-1) + (decoded, scores) = tf.nn.ctc_beam_search_decoder(inputs=inputs, sequence_length=sequence_lengths, beam_width=beam_width, top_paths=top_paths) + else: + raise ValueError(f"Invalid strategy {strategy}. Supported values are 'greedy' and 'beam_search'.") + decoded_dense = [] + for st in decoded: + st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) + decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) + decoded_dense = tf.stack(decoded_dense, axis=0) + decoded_dense = tf.cast(decoded_dense, 'int32') + if strategy == 'beam_search' and mask_index is not None: + if mask_index < 0: + mask_index = mask_index + input_shape[-1] + decoded_dense = tf.where(decoded_dense >= mask_index, decoded_dense + 1, decoded_dense) + return (decoded_dense, scores) + +def psnr(x1, x2, max_val): + from keras.src.backend.tensorflow.numpy import log10 + if x1.shape != x2.shape: + raise ValueError(f'Input shapes {x1.shape} and {x2.shape} must match for PSNR calculation. ') + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = tf.reduce_mean(tf.square(x1 - x2)) + psnr = 20 * log10(max_val) - 10 * log10(mse) + return psnr + +# File: keras-master/keras/src/backend/tensorflow/numpy.py +import builtins +import collections +import functools +import math +import string +import warnings +import numpy as np +import tensorflow as tf +from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops +from tensorflow.python.ops.math_ops import is_nan +from keras.src import tree +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.backend_utils import vectorize_impl +from keras.src.backend.tensorflow import sparse +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.tensorflow.core import shape as shape_op + +@sparse.elementwise_binary_union(tf.sparse.add) +def add(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.add(x1, x2) + +def bincount(x, weights=None, minlength=0, sparse=False): + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype] + if standardize_dtype(x.dtype) not in ['int32', 'int64']: + x = tf.cast(x, tf.int32) + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + if standardize_dtype(weights.dtype) not in ['int32', 'int64', 'float32', 'float64']: + if 'int' in standardize_dtype(weights.dtype): + weights = tf.cast(weights, tf.int32) + else: + weights = tf.cast(weights, tf.float32) + else: + dtype = 'int32' + if sparse or isinstance(x, tf.SparseTensor): + output = tf.sparse.bincount(x, weights=weights, minlength=minlength, axis=-1) + actual_length = output.shape[-1] + if actual_length is None: + actual_length = tf.shape(output)[-1] + output = cast(output, dtype) + if x.shape.rank == 1: + output_shape = (actual_length,) + else: + batch_size = output.shape[0] + if batch_size is None: + batch_size = tf.shape(output)[0] + output_shape = (batch_size, actual_length) + return tf.SparseTensor(indices=output.indices, values=output.values, dense_shape=output_shape) + return tf.cast(tf.math.bincount(x, weights=weights, minlength=minlength, axis=-1), dtype) + +@functools.lru_cache(512) +def _normalize_einsum_subscripts(subscripts): + mapping = {} + normalized_subscripts = '' + for c in subscripts: + if c in string.ascii_letters: + if c not in mapping: + mapping[c] = string.ascii_letters[len(mapping)] + normalized_subscripts += mapping[c] + else: + normalized_subscripts += c + return normalized_subscripts + +def einsum(subscripts, *operands, **kwargs): + operands = tree.map_structure(convert_to_tensor, operands) + subscripts = _normalize_einsum_subscripts(subscripts) + + def is_valid_for_custom_ops(subscripts, *operands): + if subscripts in ['a,b->ab', 'ab,b->a', 'ab,bc->ac', 'ab,cb->ac', 'abc,cd->abd', 'abc,dc->abd', 'abcd,abde->abce', 'abcd,abed->abce', 'abcd,acbe->adbe', 'abcd,adbe->acbe', 'abcd,aecd->acbe', 'abcd,aecd->aceb']: + return True + elif subscripts == 'abc,cde->abde': + (_, b1, c1) = operands[0].shape + (c2, d2, e2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abc,dce->abde': + (_, b1, c1) = operands[0].shape + (d2, c2, e2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abc,dec->abde': + (_, b1, c1) = operands[0].shape + (d2, e2, c2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abcd,cde->abe': + (_, b1, c1, d1) = operands[0].shape + (c2, d2, e2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abcd,ced->abe': + (_, b1, c1, d1) = operands[0].shape + (c2, e2, d2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abcd,ecd->abe': + (_, b1, c1, d1) = operands[0].shape + (e2, c2, d2) = operands[1].shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + if None in (b, c, d, e): + return False + return True + elif subscripts == 'abcde,aebf->adbcf': + (_, b1, c1, d1, e1) = operands[0].shape + (_, e2, b2, f2) = operands[1].shape + (b, c, d, e, f) = (b1 or b2, c1, d1, e1 or e2, f2) + if None in (b, c, d, e, f): + return False + return True + elif subscripts == 'abcde,afce->acdbf': + (_, b1, c1, d1, e1) = operands[0].shape + (_, f2, c2, e2) = operands[1].shape + (b, c, d, e, f) = (b1, c1 or c2, d1, e1 or e2, f2) + if None in (b, c, d, e, f): + return False + return True + else: + return False + + def use_custom_ops(subscripts, *operands, output_type): + (x, y) = (operands[0], operands[1]) + if subscripts == 'a,b->ab': + x = tf.expand_dims(x, axis=-1) + y = tf.expand_dims(y, axis=0) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'ab,b->a': + y = tf.expand_dims(y, axis=-1) + result = tf.matmul(x, y, output_type=output_type) + return tf.squeeze(result, axis=-1) + elif subscripts == 'ab,bc->ac': + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'ab,cb->ac': + y = tf.transpose(y, [1, 0]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abc,cd->abd': + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abc,cde->abde': + (_, b1, c1) = x.shape + (c2, d2, e2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + y = tf.reshape(y, [c, -1]) + result = tf.matmul(x, y, output_type=output_type) + return tf.reshape(result, [-1, b, d, e]) + elif subscripts == 'abc,dc->abd': + y = tf.transpose(y, [1, 0]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abc,dce->abde': + (_, b1, c1) = x.shape + (d2, c2, e2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + y = tf.transpose(y, [1, 0, 2]) + y = tf.reshape(y, [c, -1]) + result = tf.matmul(x, y, output_type=output_type) + return tf.reshape(result, [-1, b, d, e]) + elif subscripts == 'abc,dec->abde': + (_, b1, c1) = x.shape + (d2, e2, c2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d2, e2) + y = tf.transpose(y, [2, 0, 1]) + y = tf.reshape(y, [c, -1]) + result = tf.matmul(x, y, output_type=output_type) + return tf.reshape(result, [-1, b, d, e]) + elif subscripts == 'abcd,abde->abce': + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcd,abed->abce': + y = tf.transpose(y, [0, 1, 3, 2]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcd,acbe->adbe': + x = tf.transpose(x, [0, 1, 3, 2]) + y = tf.transpose(y, [0, 2, 1, 3]) + result = tf.matmul(x, y, output_type=output_type) + return tf.transpose(result, [0, 2, 1, 3]) + elif subscripts == 'abcd,adbe->acbe': + y = tf.transpose(y, [0, 2, 1, 3]) + result = tf.matmul(x, y, output_type=output_type) + return tf.transpose(result, [0, 2, 1, 3]) + elif subscripts == 'abcd,aecd->acbe': + x = tf.transpose(x, [0, 2, 1, 3]) + y = tf.transpose(y, [0, 2, 3, 1]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcd,aecd->aceb': + x = tf.transpose(x, [0, 2, 1, 3]) + y = tf.transpose(y, [0, 2, 3, 1]) + result = tf.matmul(x, y, output_type=output_type) + return tf.transpose(result, [0, 1, 3, 2]) + elif subscripts == 'abcd,cde->abe': + (_, b1, c1, d1) = x.shape + (c2, d2, e2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + x = tf.reshape(x, [-1, b, c * d]) + y = tf.reshape(y, [-1, e]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcd,ced->abe': + (_, b1, c1, d1) = x.shape + (c2, e2, d2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + x = tf.reshape(x, [-1, b, c * d]) + y = tf.transpose(y, [0, 2, 1]) + y = tf.reshape(y, [-1, e]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcd,ecd->abe': + (_, b1, c1, d1) = x.shape + (e2, c2, d2) = y.shape + (b, c, d, e) = (b1, c1 or c2, d1 or d2, e2) + x = tf.reshape(x, [-1, b, c * d]) + y = tf.transpose(y, [1, 2, 0]) + y = tf.reshape(y, [-1, e]) + return tf.matmul(x, y, output_type=output_type) + elif subscripts == 'abcde,aebf->adbcf': + (_, b1, c1, d1, e1) = x.shape + (_, e2, b2, f2) = y.shape + (b, c, d, e, f) = (b1 or b2, c1, d1, e1 or e2, f2) + x = tf.reshape(x, [-1, b, c * d, e]) + y = tf.transpose(y, [0, 2, 1, 3]) + result = tf.matmul(x, y, output_type=output_type) + result = tf.reshape(result, [-1, b, c, d, f]) + return tf.transpose(result, [0, 3, 1, 2, 4]) + elif subscripts == 'abcde,afce->acdbf': + (_, b1, c1, d1, e1) = x.shape + (_, f2, c2, e2) = y.shape + (b, c, d, e, f) = (b1, c1 or c2, d1, e1 or e2, f2) + x = tf.transpose(x, [0, 2, 3, 1, 4]) + x = tf.reshape(x, [-1, c, d * b, e]) + y = tf.transpose(y, [0, 2, 3, 1]) + result = tf.matmul(x, y, output_type=output_type) + return tf.reshape(result, [-1, c, d, b, f]) + else: + raise NotImplementedError + dtypes_to_resolve = list(set((standardize_dtype(x.dtype) for x in operands))) + if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == 'int8': + compute_dtype = 'int8' + result_dtype = 'int32' + output_type = 'int32' + else: + result_dtype = dtypes.result_type(*dtypes_to_resolve) + compute_dtype = result_dtype + output_type = None + if is_valid_for_custom_ops(subscripts, *operands) and (not kwargs): + if 'int' in compute_dtype and output_type is None: + compute_dtype = config.floatx() + operands = tree.map_structure(lambda x: tf.cast(x, compute_dtype), operands) + result = use_custom_ops(subscripts, *operands, output_type=output_type) + else: + if 'int' in compute_dtype: + compute_dtype = config.floatx() + operands = tree.map_structure(lambda x: tf.cast(x, compute_dtype), operands) + result = tf.einsum(subscripts, *operands, **kwargs) + return tf.cast(result, result_dtype) + +@sparse.elementwise_binary_union(sparse.sparse_subtract) +def subtract(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.subtract(x1, x2) + +def matmul(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + x1_shape = x1.shape + x2_shape = x2.shape + x1_sparse = isinstance(x1, tf.SparseTensor) + x2_sparse = isinstance(x2, tf.SparseTensor) + x1_dtype = standardize_dtype(x1.dtype) + x2_dtype = standardize_dtype(x2.dtype) + if x1_dtype == 'int8' and x2_dtype == 'int8' and (not x1_sparse) and (not x2_sparse) and (x1_shape.rank != 1) and (x2_shape.rank != 1): + compute_dtype = 'int8' + result_dtype = 'int32' + output_type = result_dtype + else: + compute_dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + output_type = None + x1 = tf.cast(x1, compute_dtype) + x2 = tf.cast(x2, compute_dtype) + + def with_combined_batch_dimensions(a, b, output_shape, fn_3d): + a_sparse = isinstance(a, tf.SparseTensor) + b_sparse = isinstance(b, tf.SparseTensor) + batch_shape = b.shape[:-2] if b_sparse else a.shape[:-2] + batch_size = math.prod(batch_shape) + a3d_shape = [batch_size] + a.shape[-2:] + a_3d = tf.sparse.reshape(a, a3d_shape) if a_sparse else tf.reshape(a, a3d_shape) + b3d_shape = [batch_size] + b.shape[-2:] + b_3d = tf.sparse.reshape(b, b3d_shape) if b_sparse else tf.reshape(b, b3d_shape) + result_3d = fn_3d(a_3d, b_3d) + return tf.sparse.reshape(result_3d, output_shape) if isinstance(result_3d, tf.SparseTensor) else tf.reshape(result_3d, output_shape) + + def sparse_sparse_matmul(a, b): + dtype = a.values.dtype + a_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(a.indices, a.values, a.dense_shape) + b_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(b.indices, b.values, b.dense_shape) + result_csr = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(a_csr, b_csr, dtype) + res = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(result_csr, dtype) + return tf.SparseTensor(res.indices, res.values, res.dense_shape) + + def embedding_lookup_sparse_dense_matmul(a, b): + (a, _) = tf.sparse.fill_empty_rows(a, 0) + ids = tf.SparseTensor(indices=a.indices, values=a.indices[:, 1], dense_shape=a.dense_shape) + return tf.nn.embedding_lookup_sparse(b, ids, a, combiner='sum') + + def sparse_dense_matmul_3d(a, b): + return tf.map_fn(lambda x: tf.sparse.sparse_dense_matmul(x[0], x[1]), elems=(a, b), fn_output_signature=a.dtype) + if x1_sparse or x2_sparse: + from keras.src.ops.operation_utils import compute_matmul_output_shape + output_shape = compute_matmul_output_shape(x1_shape, x2_shape) + if x1_sparse and x2_sparse: + if x1_shape.rank <= 3: + output = sparse_sparse_matmul(x1, x2) + else: + output = with_combined_batch_dimensions(x1, x2, output_shape, sparse_sparse_matmul) + else: + sparse_rank = x1_shape.rank if x1_sparse else x2_shape.rank + if x1_sparse and sparse_rank == 2: + output = embedding_lookup_sparse_dense_matmul(x1, x2) + elif sparse_rank == 2: + output = tf.sparse.sparse_dense_matmul(x1, x2) + elif sparse_rank == 3: + output = sparse_dense_matmul_3d(x1, x2) + else: + output = with_combined_batch_dimensions(x1, x2, output_shape, sparse_dense_matmul_3d) + output = tf.cast(output, result_dtype) + output.set_shape(output_shape) + return output + else: + if x1_shape.rank == 2 and x2_shape.rank == 2: + output = tf.matmul(x1, x2, output_type=output_type) + elif x2_shape.rank == 1: + output = tf.tensordot(x1, x2, axes=1) + elif x1_shape.rank == 1: + output = tf.tensordot(x1, x2, axes=[[0], [-2]]) + else: + output = tf.matmul(x1, x2, output_type=output_type) + return tf.cast(output, result_dtype) + +@sparse.elementwise_binary_intersection +def multiply(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.multiply(x1, x2) + +def mean(x, axis=None, keepdims=False): + if isinstance(x, tf.IndexedSlices): + if axis is None: + sum = tf.reduce_sum(x.values, keepdims=keepdims) + return sum / tf.cast(tf.reduce_prod(x.dense_shape), dtype=sum.dtype) + axis = to_tuple_or_list(axis) + if not axis: + return x + dense_shape = tf.convert_to_tensor(x.dense_shape) + rank = tf.shape(dense_shape)[0] + axis = [canonicalize_axis(a, rank) for a in axis] + axis.sort() + if axis == [0]: + sum = tf.reduce_sum(x.values, axis=0, keepdims=keepdims) + return sum / tf.cast(dense_shape[0], dtype=sum.dtype) + elif axis[0] == 0: + sum = tf.reduce_sum(x.values, axis=0, keepdims=True) + axis_0_mean = sum / tf.cast(dense_shape[0], dtype=sum.dtype) + return tf.reduce_mean(axis_0_mean, axis=axis, keepdims=keepdims) + elif keepdims: + new_values = tf.reduce_mean(x.values, axis=axis, keepdims=True) + new_dense_shape = tf.concat([dense_shape[0:1], new_values.shape[1:]], axis=0) + return tf.IndexedSlices(new_values, x.indices, new_dense_shape) + elif rank == len(axis) + 1: + return tf.scatter_nd(tf.expand_dims(x.indices, axis=1), tf.reduce_mean(x.values, axis=axis), [dense_shape[0]]) + else: + gather_indices = [i for i in range(rank) if i not in axis] + return tf.IndexedSlices(tf.reduce_mean(x.values, axis=axis), x.indices, tf.gather(x.dense_shape, gather_indices, axis=0)) + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + if 'int' in ori_dtype or ori_dtype == 'bool': + result_dtype = compute_dtype + else: + result_dtype = ori_dtype + output = tf.reduce_mean(tf.cast(x, compute_dtype), axis=axis, keepdims=keepdims) + return tf.cast(output, result_dtype) + +def max(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + if initial is not None: + if standardize_dtype(x.dtype) == 'bool': + x = tf.reduce_any(x, axis=axis, keepdims=keepdims) + x = tf.math.maximum(tf.cast(x, 'int32'), tf.cast(initial, 'int32')) + return tf.cast(x, 'bool') + else: + x = tf.reduce_max(x, axis=axis, keepdims=keepdims) + return tf.math.maximum(x, initial) + if tf.executing_eagerly(): + size_x = size(x) + tf.assert_greater(size_x, tf.constant(0, dtype=size_x.dtype), message='Cannot compute the max of an empty tensor.') + if standardize_dtype(x.dtype) == 'bool': + return tf.reduce_any(x, axis=axis, keepdims=keepdims) + else: + return tf.reduce_max(x, axis=axis, keepdims=keepdims) + +def ones(shape, dtype=None): + dtype = dtype or config.floatx() + return tf.ones(shape, dtype=dtype) + +def zeros(shape, dtype=None): + dtype = dtype or config.floatx() + return tf.zeros(shape, dtype=dtype) + +@sparse.elementwise_unary +def absolute(x): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if 'uint' in dtype or dtype == 'bool': + return x + return tf.abs(x) + +def abs(x): + return absolute(x) + +def all(x, axis=None, keepdims=False): + x = tf.cast(x, 'bool') + return tf.reduce_all(x, axis=axis, keepdims=keepdims) + +def any(x, axis=None, keepdims=False): + x = tf.cast(x, 'bool') + return tf.reduce_any(x, axis=axis, keepdims=keepdims) + +def amax(x, axis=None, keepdims=False): + return max(x, axis=axis, keepdims=keepdims) + +def amin(x, axis=None, keepdims=False): + return min(x, axis=axis, keepdims=keepdims) + +def append(x1, x2, axis=None): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + if axis is None: + return tf.concat([tf.reshape(x1, [-1]), tf.reshape(x2, [-1])], axis=0) + else: + return tf.concat([x1, x2], axis=axis) + +def arange(start, stop=None, step=1, dtype=None): + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(step, 'dtype', type(step))] + if stop is not None: + dtypes_to_resolve.append(getattr(stop, 'dtype', type(stop))) + dtype = dtypes.result_type(*dtypes_to_resolve) + dtype = standardize_dtype(dtype) + try: + out = tf.range(start, stop, delta=step, dtype=dtype) + except tf.errors.NotFoundError: + out = tf.range(start, stop, delta=step, dtype='float32') + out = tf.cast(out, dtype) + return out + +@sparse.densifying_unary(0.5 * np.pi) +def arccos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.acos(x) + +@sparse.densifying_unary(np.nan) +def arccosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.acosh(x) + +@sparse.elementwise_unary +def arcsin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.asin(x) + +@sparse.elementwise_unary +def arcsinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.asinh(x) + +@sparse.elementwise_unary +def arctan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.atan(x) + +def arctan2(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.math.atan2(x1, x2) + +@sparse.elementwise_unary +def arctanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.atanh(x) + +def _keepdims(x, y, axis): + if axis is None: + shape = [1 for _ in range(len(x.shape))] + else: + shape = list(shape_op(x)) + for axis in tree.flatten(axis): + shape[axis] = 1 + y = tf.reshape(y, shape) + return y + +def argmax(x, axis=None, keepdims=False): + _x = x + if axis is None: + x = tf.reshape(x, [-1]) + y = tf.argmax(x, axis=axis, output_type='int32') + if keepdims: + y = _keepdims(_x, y, axis) + return y + +def argmin(x, axis=None, keepdims=False): + _x = x + if axis is None: + x = tf.reshape(x, [-1]) + y = tf.argmin(x, axis=axis, output_type='int32') + if keepdims: + y = _keepdims(_x, y, axis) + return y + +def argsort(x, axis=-1): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = tf.cast(x, 'uint8') + x_shape = x.shape + if x_shape.rank == 0: + return tf.cast([0], 'int32') + if axis is None: + x = tf.reshape(x, [-1]) + axis = 0 + return tf.argsort(x, axis=axis) + +def array(x, dtype=None): + return convert_to_tensor(x, dtype=dtype) + +def average(x, axis=None, weights=None): + x = convert_to_tensor(x) + if weights is None: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + avg = tf.reduce_mean(x, axis=axis) + else: + weights = convert_to_tensor(weights) + dtype = dtypes.result_type(x.dtype, weights.dtype, float) + x = tf.cast(x, dtype) + weights = tf.cast(weights, dtype) + + def _rank_equal_case(): + weights_sum = tf.reduce_sum(weights, axis=axis) + return tf.reduce_sum(x * weights, axis=axis) / weights_sum + + def _rank_not_equal_case(): + weights_sum = tf.reduce_sum(weights) + axes = tf.convert_to_tensor([[axis], [0]]) + return tf.tensordot(x, weights, axes) / weights_sum + if axis is None: + avg = _rank_equal_case() + elif len(x.shape) == len(weights.shape): + avg = _rank_equal_case() + else: + avg = _rank_not_equal_case() + return avg + +def bitwise_and(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = tf.cast(x, dtype) + y = tf.cast(y, dtype) + return tf.bitwise.bitwise_and(x, y) + +def bitwise_invert(x): + x = convert_to_tensor(x) + return tf.bitwise.invert(x) + +def bitwise_not(x): + return bitwise_invert(x) + +def bitwise_or(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = tf.cast(x, dtype) + y = tf.cast(y, dtype) + return tf.bitwise.bitwise_or(x, y) + +def bitwise_xor(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = tf.cast(x, dtype) + y = tf.cast(y, dtype) + return tf.bitwise.bitwise_xor(x, y) + +def bitwise_left_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = tf.cast(x, dtype) + y = tf.cast(y, dtype) + return tf.bitwise.left_shift(x, y) + +def left_shift(x, y): + return bitwise_left_shift(x, y) + +def bitwise_right_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + dtype = dtypes.result_type(x.dtype, y.dtype) + x = tf.cast(x, dtype) + y = tf.cast(y, dtype) + return tf.bitwise.right_shift(x, y) + +def right_shift(x, y): + return bitwise_right_shift(x, y) + +def broadcast_to(x, shape): + return tf.broadcast_to(x, shape) + +@sparse.elementwise_unary +def ceil(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.ceil(x) + +def clip(x, x_min, x_max): + dtype = standardize_dtype(x.dtype) + if dtype == 'bool': + x = tf.cast(x, 'int32') + return tf.clip_by_value(x, x_min, x_max) + +def concatenate(xs, axis=0): + sparse_count = builtins.sum((isinstance(x, tf.SparseTensor) for x in xs)) + if sparse_count: + if sparse_count == len(xs): + return tf.sparse.concat(axis=axis, sp_inputs=xs) + else: + xs = [convert_to_tensor(x, sparse=False) if isinstance(x, tf.SparseTensor) else x for x in xs] + xs = tree.map_structure(convert_to_tensor, xs) + dtype_set = set([x.dtype for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: tf.cast(x, dtype), xs) + return tf.concat(xs, axis=axis) + +@sparse.elementwise_unary +def conjugate(x): + return tf.math.conj(x) + +@sparse.elementwise_unary +def conj(x): + return tf.math.conj(x) + +@sparse.elementwise_unary +def copy(x): + x = convert_to_tensor(x) + return tf.identity(x) + +@sparse.densifying_unary(1) +def cos(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.cos(x) + +@sparse.densifying_unary(1) +def cosh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.cosh(x) + +def count_nonzero(x, axis=None): + return tf.math.count_nonzero(x, axis=axis, dtype='int32') + +def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + if axis is not None: + axisa = axis + axisb = axis + axisc = axis + x1 = moveaxis(x1, axisa, -1) + x2 = moveaxis(x2, axisb, -1) + + def maybe_pad_zeros(x, size_of_last_dim): + + def pad_zeros(x): + return tf.pad(x, tf.concat([tf.zeros([tf.rank(x) - 1, 2], 'int32'), tf.constant([[0, 1]], 'int32')], axis=0)) + if isinstance(size_of_last_dim, int): + if size_of_last_dim == 2: + return pad_zeros(x) + return x + return tf.cond(tf.equal(size_of_last_dim, 2), lambda : pad_zeros(x), lambda : x) + x1_dim = shape_op(x1)[-1] + x2_dim = shape_op(x2)[-1] + x1 = maybe_pad_zeros(x1, x1_dim) + x2 = maybe_pad_zeros(x2, x2_dim) + shape = shape_op(x1) + shape = tf.broadcast_dynamic_shape(shape, shape_op(x2)) + x1 = tf.broadcast_to(x1, shape) + x2 = tf.broadcast_to(x2, shape) + c = tf.linalg.cross(x1, x2) + if isinstance(x1_dim, int) and isinstance(x2_dim, int): + if (x1_dim == 2) & (x2_dim == 2): + return c[..., 2] + return moveaxis(c, -1, axisc) + return tf.cond((x1_dim == 2) & (x2_dim == 2), lambda : c[..., 2], lambda : moveaxis(c, -1, axisc)) + +def cumprod(x, axis=None, dtype=None): + x = convert_to_tensor(x, dtype=dtype) + if standardize_dtype(x.dtype) == 'bool': + x = tf.cast(x, 'int32') + if axis is None: + x = tf.reshape(x, [-1]) + axis = 0 + return tf.math.cumprod(x, axis=axis) + +def cumsum(x, axis=None, dtype=None): + x = convert_to_tensor(x, dtype=dtype) + if standardize_dtype(x.dtype) == 'bool': + x = tf.cast(x, 'int32') + if axis is None: + x = tf.reshape(x, [-1]) + axis = 0 + return tf.math.cumsum(x, axis=axis) + +def diag(x, k=0): + x = convert_to_tensor(x) + if len(x.shape) == 1: + return tf.cond(tf.equal(tf.size(x), 0), lambda : tf.zeros([builtins.abs(k), builtins.abs(k)], dtype=x.dtype), lambda : tf.linalg.diag(x, k=k)) + elif len(x.shape) == 2: + return diagonal(x, offset=k) + else: + raise ValueError(f'`x` must be 1d or 2d. Received: x.shape={x.shape}') + +def diagonal(x, offset=0, axis1=0, axis2=1): + x = convert_to_tensor(x) + x_rank = x.ndim + if offset == 0 and (axis1 == x_rank - 2 or axis1 == -2) and (axis2 == x_rank - 1 or axis2 == -1): + return tf.linalg.diag_part(x) + x = moveaxis(x, (axis1, axis2), (-2, -1)) + x_shape = shape_op(x) + + def _zeros(): + return tf.zeros(tf.concat([x_shape[:-1], [0]], 0), dtype=x.dtype) + if isinstance(x_shape[-1], int) and isinstance(x_shape[-2], int): + if offset <= -1 * x_shape[-2] or offset >= x_shape[-1]: + x = _zeros() + else: + x = tf.cond(tf.logical_or(tf.less_equal(offset, -1 * x_shape[-2]), tf.greater_equal(offset, x_shape[-1])), lambda : _zeros(), lambda : x) + return tf.linalg.diag_part(x, k=offset) + +def diff(a, n=1, axis=-1): + a = convert_to_tensor(a) + if n == 0: + return a + elif n < 0: + raise ValueError(f'Order `n` must be non-negative. Received n={n}') + elif a.ndim == 0: + raise ValueError(f'`diff` requires input that is at least one dimensional. Received: a={a}') + axis = canonicalize_axis(axis, a.ndim) + slice1 = [slice(None)] * a.ndim + slice2 = [slice(None)] * a.ndim + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1_tuple = tuple(slice1) + slice2_tuple = tuple(slice2) + for _ in range(n): + if standardize_dtype(a.dtype) == 'bool': + a = tf.not_equal(a[slice1_tuple], a[slice2_tuple]) + else: + a = tf.subtract(a[slice1_tuple], a[slice2_tuple]) + return a + +def digitize(x, bins): + x = convert_to_tensor(x) + bins = list(bins) + bins = tree.map_structure(lambda x: float(x), bins) + ori_dtype = standardize_dtype(x.dtype) + if ori_dtype in ('bool', 'int8', 'int16', 'uint8', 'uint16'): + x = cast(x, 'int32') + elif ori_dtype == 'uint32': + x = cast(x, 'int64') + elif ori_dtype in ('bfloat16', 'float16'): + x = cast(x, 'float32') + if isinstance(x, tf.RaggedTensor): + return tf.ragged.map_flat_values(lambda y: tf.raw_ops.Bucketize(input=y, boundaries=bins), x) + elif isinstance(x, tf.SparseTensor): + output = tf.SparseTensor(indices=tf.identity(x.indices), values=tf.raw_ops.Bucketize(input=x.values, boundaries=bins), dense_shape=tf.identity(x.dense_shape)) + output.set_shape(x.shape) + return output + return tf.raw_ops.Bucketize(input=x, boundaries=bins) + +def dot(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + result_dtype = dtypes.result_type(x.dtype, y.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + x = tf.cast(x, compute_dtype) + y = tf.cast(y, compute_dtype) + x_shape = x.shape + y_shape = y.shape + if x_shape.rank == 0 or y_shape.rank == 0: + output = x * y + elif y_shape.rank == 1: + output = tf.tensordot(x, y, axes=[[-1], [-1]]) + else: + output = tf.tensordot(x, y, axes=[[-1], [-2]]) + return tf.cast(output, result_dtype) + +def empty(shape, dtype=None): + dtype = dtype or config.floatx() + return tf.zeros(shape, dtype=dtype) + +def equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.equal(x1, x2) + +@sparse.densifying_unary(1) +def exp(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = tf.cast(x, config.floatx()) + return tf.exp(x) + +def expand_dims(x, axis): + x = convert_to_tensor(x) + axis = to_tuple_or_list(axis) + out_ndim = len(x.shape) + len(axis) + axis = sorted([canonicalize_axis(a, out_ndim) for a in axis]) + if isinstance(x, tf.SparseTensor): + from keras.src.ops.operation_utils import compute_expand_dims_output_shape + output_shape = compute_expand_dims_output_shape(x.shape, axis) + for a in axis: + x = tf.sparse.expand_dims(x, a) + x.set_shape(output_shape) + return x + for a in axis: + x = tf.expand_dims(x, a) + return x + +@sparse.elementwise_unary +def expm1(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = tf.cast(x, config.floatx()) + return tf.math.expm1(x) + +def flip(x, axis=None): + x = convert_to_tensor(x) + if axis is None: + return tf.reverse(x, tf.range(tf.rank(x))) + return tf.reverse(x, [axis]) + +@sparse.elementwise_unary +def floor(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.floor(x) + +def full(shape, fill_value, dtype=None): + dtype = dtype or config.floatx() + fill_value = convert_to_tensor(fill_value, dtype) + return tf.broadcast_to(fill_value, shape) + +def full_like(x, fill_value, dtype=None): + x = convert_to_tensor(x) + dtype = dtypes.result_type(dtype or x.dtype) + fill_value = convert_to_tensor(fill_value, dtype) + return tf.broadcast_to(fill_value, tf.shape(x)) + +def greater(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.greater(x1, x2) + +def greater_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.greater_equal(x1, x2) + +def hstack(xs): + dtype_set = set([getattr(x, 'dtype', type(x)) for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: convert_to_tensor(x, dtype), xs) + if len(xs[0].shape) == 1: + return tf.concat(xs, axis=0) + return tf.concat(xs, axis=1) + +def identity(n, dtype=None): + return eye(N=n, M=n, dtype=dtype) + +@sparse.elementwise_unary +def imag(x): + return tf.math.imag(x) + +def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + if 'float' in dtype: + result = tf.abs(x1 - x2) <= atol + rtol * tf.abs(x2) + if equal_nan: + result = result | is_nan(x1) & is_nan(x2) + return result + else: + return tf.equal(x1, x2) + +@sparse.densifying_unary(True) +def isfinite(x): + x = convert_to_tensor(x) + dtype_as_dtype = tf.as_dtype(x.dtype) + if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: + return tf.ones(x.shape, tf.bool) + return tf.math.is_finite(x) + +def isinf(x): + x = convert_to_tensor(x) + dtype_as_dtype = tf.as_dtype(x.dtype) + if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: + return tf.zeros(x.shape, tf.bool) + return tf.math.is_inf(x) + +def isnan(x): + x = convert_to_tensor(x) + dtype_as_dtype = tf.as_dtype(x.dtype) + if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: + return tf.zeros(x.shape, tf.bool) + return tf.math.is_nan(x) + +def less(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.less(x1, x2) + +def less_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.less_equal(x1, x2) + +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + if num < 0: + raise ValueError(f'`num` must be a non-negative integer. Received: num={num}') + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(stop, 'dtype', type(stop)), float] + dtype = dtypes.result_type(*dtypes_to_resolve) + else: + dtype = standardize_dtype(dtype) + start = convert_to_tensor(start, dtype=dtype) + stop = convert_to_tensor(stop, dtype=dtype) + step = convert_to_tensor(np.nan) + if endpoint: + result = tf.linspace(start, stop, num, axis=axis) + if num > 1: + step = (stop - start) / (tf.cast(num, dtype) - 1) + else: + if num > 0: + step = (stop - start) / tf.cast(num, dtype) + if num > 1: + new_stop = tf.cast(stop, step.dtype) - step + start = tf.cast(start, new_stop.dtype) + result = tf.linspace(start, new_stop, num, axis=axis) + else: + result = tf.linspace(start, stop, num, axis=axis) + if dtype is not None: + if 'int' in dtype: + result = tf.floor(result) + result = tf.cast(result, dtype) + if retstep: + return (result, step) + else: + return result + +@sparse.densifying_unary(-np.inf) +def log(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.log(x) + +@sparse.densifying_unary(-np.inf) +def log10(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.log(x) / tf.math.log(tf.constant(10, x.dtype)) + +@sparse.elementwise_unary +def log1p(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.log1p(x) + +@sparse.densifying_unary(-np.inf) +def log2(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.log(x) / tf.math.log(tf.constant(2, x.dtype)) + +def logaddexp(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + delta = x1 - x2 + return tf.where(tf.math.is_nan(delta), x1 + x2, tf.maximum(x1, x2) + tf.math.log1p(tf.math.exp(-tf.abs(delta)))) + +def logical_and(x1, x2): + x1 = tf.cast(x1, 'bool') + x2 = tf.cast(x2, 'bool') + return tf.logical_and(x1, x2) + +def logical_not(x): + x = tf.cast(x, 'bool') + return tf.logical_not(x) + +def logical_or(x1, x2): + x1 = tf.cast(x1, 'bool') + x2 = tf.cast(x2, 'bool') + return tf.logical_or(x1, x2) + +def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): + result = linspace(start=start, stop=stop, num=num, endpoint=endpoint, dtype=dtype, axis=axis) + return tf.pow(tf.cast(base, result.dtype), result) + +@sparse.elementwise_binary_union(tf.sparse.maximum, densify_mixed=True) +def maximum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.maximum(x1, x2) + +def median(x, axis=None, keepdims=False): + return quantile(x, 0.5, axis=axis, keepdims=keepdims) + +def meshgrid(*x, indexing='xy'): + return tf.meshgrid(*x, indexing=indexing) + +def min(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + if initial is not None: + if standardize_dtype(x.dtype) == 'bool': + x = tf.reduce_all(x, axis=axis, keepdims=keepdims) + x = tf.math.minimum(tf.cast(x, 'int32'), tf.cast(initial, 'int32')) + return tf.cast(x, 'bool') + else: + x = tf.reduce_min(x, axis=axis, keepdims=keepdims) + return tf.math.minimum(x, initial) + if tf.executing_eagerly(): + size_x = size(x) + tf.assert_greater(size_x, tf.constant(0, dtype=size_x.dtype), message='Cannot compute the min of an empty tensor.') + if standardize_dtype(x.dtype) == 'bool': + return tf.reduce_all(x, axis=axis, keepdims=keepdims) + else: + return tf.reduce_min(x, axis=axis, keepdims=keepdims) + +@sparse.elementwise_binary_union(tf.sparse.minimum, densify_mixed=True) +def minimum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.minimum(x1, x2) + +def mod(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + if dtype == 'bool': + dtype = 'int32' + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.math.mod(x1, x2) + +def moveaxis(x, source, destination): + x = convert_to_tensor(x) + _source = to_tuple_or_list(source) + _destination = to_tuple_or_list(destination) + _source = tuple((canonicalize_axis(i, x.ndim) for i in _source)) + _destination = tuple((canonicalize_axis(i, x.ndim) for i in _destination)) + if len(_source) != len(_destination): + raise ValueError(f'Inconsistent number of `source` and `destination`. Received: source={source}, destination={destination}') + if _source == _destination: + return x + perm = [i for i in range(x.ndim) if i not in _source] + for (dest, src) in sorted(zip(_destination, _source)): + perm.insert(dest, src) + return tf.transpose(x, perm) + +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + x = convert_to_tensor(x) + dtype = x.dtype + dtype_as_dtype = tf.as_dtype(dtype) + if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: + return x + x = tf.where(tf.math.is_nan(x), tf.constant(nan, dtype), x) + if posinf is None: + posinf = dtype.max + x = tf.where(tf.math.is_inf(x) & (x > 0), tf.constant(posinf, dtype), x) + if neginf is None: + neginf = dtype.min + x = tf.where(tf.math.is_inf(x) & (x < 0), tf.constant(neginf, dtype), x) + return x + +def ndim(x): + x = convert_to_tensor(x) + return x.ndim + +def nonzero(x): + x = convert_to_tensor(x) + result = tf.unstack(tf.where(tf.cast(x, 'bool')), x.shape.rank, axis=1) + return tree.map_structure(lambda indices: tf.cast(indices, 'int32'), result) + +def not_equal(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.not_equal(x1, x2) + +def ones_like(x, dtype=None): + return tf.ones_like(x, dtype=dtype) + +def zeros_like(x, dtype=None): + return tf.zeros_like(x, dtype=dtype) + +def outer(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + return tf.reshape(x1, [-1, 1]) * tf.reshape(x2, [-1]) + +def pad(x, pad_width, mode='constant', constant_values=None): + x = convert_to_tensor(x) + kwargs = {} + if constant_values is not None: + if mode != 'constant': + raise ValueError(f"Argument `constant_values` can only be provided when `mode == 'constant'`. Received: mode={mode}") + kwargs['constant_values'] = constant_values + pad_width = convert_to_tensor(pad_width, 'int32') + return tf.pad(x, pad_width, mode.upper(), **kwargs) + +def prod(x, axis=None, keepdims=False, dtype=None): + x = convert_to_tensor(x) + if dtype is None: + dtype = dtypes.result_type(x.dtype) + if dtype == 'bool': + dtype = 'int32' + elif dtype in ('int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + x = tf.cast(x, dtype) + return tf.reduce_prod(x, axis=axis, keepdims=keepdims) + +def _quantile(x, q, axis=None, method='linear', keepdims=False): + q = tf.cast(q, 'float64') + if axis is None: + y = tf.reshape(x, [-1]) + else: + x_ndims = len(x.shape) + axis = [canonicalize_axis(a, x_ndims) for a in axis] + other_dims = sorted(set(range(x_ndims)).difference(axis)) + perm = other_dims + list(axis) + x_permed = tf.transpose(a=x, perm=perm) + if None not in x.shape: + x_shape = list(x.shape) + other_shape = [x_shape[i] for i in other_dims] + end_shape = [math.prod([x_shape[i] for i in axis])] + full_shape = other_shape + end_shape + else: + other_shape = tf.gather(tf.shape(x), tf.cast(other_dims, tf.int64)) + full_shape = tf.concat([other_shape, [-1]], axis=0) + y = tf.reshape(x_permed, shape=full_shape) + sorted_y = tf.sort(y, axis=-1, direction='ASCENDING') + d = tf.cast(tf.shape(y)[-1], 'float64') + + def _get_indices(method): + if method == 'lower': + indices = tf.math.floor((d - 1) * q) + elif method == 'higher': + indices = tf.math.ceil((d - 1) * q) + elif method == 'nearest': + indices = tf.round((d - 1) * q) + return tf.clip_by_value(tf.cast(indices, 'int32'), 0, tf.shape(y)[-1] - 1) + if method in ['nearest', 'lower', 'higher']: + gathered_y = tf.gather(sorted_y, _get_indices(method), axis=-1) + elif method == 'midpoint': + gathered_y = 0.5 * (tf.gather(sorted_y, _get_indices('lower'), axis=-1) + tf.gather(sorted_y, _get_indices('higher'), axis=-1)) + elif method == 'linear': + larger_y_idx = _get_indices('higher') + exact_idx = (d - 1) * q + smaller_y_idx = tf.maximum(larger_y_idx - 1, 0) + larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(y)[-1] - 1) + fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx + fraction = tf.cast(fraction, y.dtype) + gathered_y = tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) + tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction + if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64): + nan_batch_members = tf.reduce_any(tf.math.is_nan(x), axis=axis) + right_rank_matched_shape = tf.pad(tf.shape(nan_batch_members), paddings=[[0, tf.rank(q)]], constant_values=1) + nan_batch_members = tf.reshape(nan_batch_members, shape=right_rank_matched_shape) + gathered_y = tf.where(nan_batch_members, float('NaN'), gathered_y) + if keepdims: + if axis is None: + ones_vec = tf.ones(shape=[tf.rank(x) + tf.rank(q)], dtype='int32') + gathered_y *= tf.ones(ones_vec, dtype=gathered_y.dtype) + else: + for i in sorted(axis): + gathered_y = tf.expand_dims(gathered_y, axis=i) + shift_value_static = tf.get_static_value(tf.rank(q)) + ndims = tf.TensorShape(gathered_y.shape).rank + if ndims < 2: + return gathered_y + shift_value_static = int(math.copysign(1, shift_value_static) * (builtins.abs(shift_value_static) % ndims)) + if shift_value_static == 0: + return gathered_y + perm = collections.deque(range(ndims)) + perm.rotate(shift_value_static) + return tf.transpose(a=gathered_y, perm=perm) + +def quantile(x, q, axis=None, method='linear', keepdims=False): + x = convert_to_tensor(x) + q = convert_to_tensor(q) + axis = to_tuple_or_list(axis) + compute_dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, compute_dtype) + return _quantile(x, q, axis=axis, method=method, keepdims=keepdims) + +def ravel(x): + x = convert_to_tensor(x) + return tf.reshape(x, [-1]) + +@sparse.elementwise_unary +def real(x): + x = convert_to_tensor(x) + return tf.math.real(x) + +@sparse.densifying_unary(np.inf) +def reciprocal(x): + x = convert_to_tensor(x) + return tf.math.reciprocal(x) + +def repeat(x, repeats, axis=None): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'uint16': + x = tf.cast(x, 'uint32') + return tf.cast(tf.repeat(x, repeats, axis=axis), 'uint16') + return tf.repeat(x, repeats, axis=axis) + +def reshape(x, newshape): + x = convert_to_tensor(x) + if isinstance(x, tf.SparseTensor): + from keras.src.ops.operation_utils import compute_reshape_output_shape + output_shape = compute_reshape_output_shape(x.shape, newshape, 'newshape') + output = tf.sparse.reshape(x, newshape) + output.set_shape(output_shape) + return output + return tf.reshape(x, newshape) + +def roll(x, shift, axis=None): + x = convert_to_tensor(x) + if axis is not None: + return tf.roll(x, shift=shift, axis=axis) + original_shape = tf.shape(x) + x = tf.roll(tf.reshape(x, [-1]), shift, 0) + return tf.reshape(x, original_shape) + +def searchsorted(sorted_sequence, values, side='left'): + if ndim(sorted_sequence) != 1: + raise ValueError(f'`searchsorted` only supports 1-D sorted sequences. You can use `keras.ops.vectorized_map` to extend it to N-D sequences. Received: sorted_sequence.shape={sorted_sequence.shape}') + out_type = 'int32' if len(sorted_sequence) <= np.iinfo(np.int32).max else 'int64' + return tf.searchsorted(sorted_sequence, values, side=side, out_type=out_type) + +@sparse.elementwise_unary +def sign(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if ori_dtype in ('uint8', 'uint16', 'uint32'): + x = tf.cast(x, 'int32') + return tf.cast(tf.sign(x), ori_dtype) + return tf.sign(x) + +@sparse.elementwise_unary +def sin(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.sin(x) + +@sparse.elementwise_unary +def sinh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.sinh(x) + +def size(x): + x = convert_to_tensor(x) + return tf.size(x) + +def sort(x, axis=-1): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if ori_dtype == 'bool': + x = tf.cast(x, 'int8') + return tf.cast(tf.sort(x, axis=axis), ori_dtype) + return tf.sort(x, axis=axis) + +def split(x, indices_or_sections, axis=0): + if not isinstance(indices_or_sections, int): + total_size = x.shape[axis] + indices_or_sections = convert_to_tensor(indices_or_sections) + start_size = indices_or_sections[0:1] + end_size = total_size - indices_or_sections[-1:] + num_or_size_splits = tf.concat([start_size, diff(indices_or_sections), end_size], axis=0) + else: + num_or_size_splits = indices_or_sections + return tf.split(x, num_or_size_splits, axis=axis) + +def stack(x, axis=0): + dtype_set = set([getattr(a, 'dtype', type(a)) for a in x]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + x = tree.map_structure(lambda a: convert_to_tensor(a, dtype), x) + return tf.stack(x, axis=axis) + +def std(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = tf.cast(x, config.floatx()) + return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) + +def swapaxes(x, axis1, axis2): + x = convert_to_tensor(x) + if x.shape.rank is not None and isinstance(axis1, int) and isinstance(axis2, int): + axis1 = canonicalize_axis(axis1, x.ndim) + axis2 = canonicalize_axis(axis2, x.ndim) + if axis1 == axis2: + return x + perm = list(range(x.ndim)) + perm[axis1] = axis2 + perm[axis2] = axis1 + else: + x_rank = tf.rank(x) + axis1 = tf.where(axis1 < 0, tf.add(axis1, x_rank), axis1) + axis2 = tf.where(axis2 < 0, tf.add(axis2, x_rank), axis2) + perm = tf.range(x_rank) + perm = tf.tensor_scatter_nd_update(perm, [[axis1], [axis2]], [axis2, axis1]) + return tf.transpose(x, perm) + +def take(x, indices, axis=None): + if isinstance(indices, tf.SparseTensor): + if x.dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16): + warnings.warn(f'`take` with the TensorFlow backend does not support `x.dtype={x.dtype}` when `indices` is a sparse tensor; densifying `indices`.') + return take(x, convert_to_tensor(indices, sparse=False), axis=axis) + if axis is None: + x = tf.reshape(x, (-1,)) + elif axis != 0: + warnings.warn(f'`take` with the TensorFlow backend does not support `axis={axis}` when `indices` is a sparse tensor; densifying `indices`.') + return take(x, convert_to_tensor(indices, sparse=False), axis=axis) + output = tf.nn.safe_embedding_lookup_sparse(embedding_weights=tf.convert_to_tensor(x), sparse_ids=tf.sparse.expand_dims(indices, axis=-1), default_id=0) + output.set_shape(indices.shape + output.shape[len(indices.shape):]) + return output + x = convert_to_tensor(x) + indices = convert_to_tensor(indices) + if axis is None: + x = tf.reshape(x, [-1]) + axis = 0 + indices = tf.where(indices < 0, indices + tf.cast(tf.shape(x)[axis], indices.dtype), indices) + return tf.gather(x, indices, axis=axis) + +def take_along_axis(x, indices, axis=None): + from keras.src.ops.operation_utils import compute_take_along_axis_output_shape + x = convert_to_tensor(x) + indices = convert_to_tensor(indices, 'int64') + if axis is None: + if indices.ndim != 1: + raise ValueError(f'`indices` must be 1D if axis=None. Received: indices.shape={indices.shape}') + return take_along_axis(tf.reshape(x, [-1]), indices, 0) + static_output_shape = compute_take_along_axis_output_shape(x.shape, indices.shape, axis) + rank = x.ndim + static_axis = axis + axis = axis + rank if axis < 0 else axis + x_shape_original = tf.shape(x, out_type=indices.dtype) + indices_shape_original = tf.shape(indices, out_type=indices.dtype) + x_shape = tf.tensor_scatter_nd_update(x_shape_original, [[axis]], [1]) + indices_shape = tf.tensor_scatter_nd_update(indices_shape_original, [[axis]], [1]) + broadcasted_shape = tf.broadcast_dynamic_shape(x_shape, indices_shape) + x_shape = tf.tensor_scatter_nd_update(broadcasted_shape, [[axis]], [x_shape_original[axis]]) + indices_shape = tf.tensor_scatter_nd_update(broadcasted_shape, [[axis]], [indices_shape_original[axis]]) + x = tf.broadcast_to(x, x_shape) + indices = tf.broadcast_to(indices, indices_shape) + indices = tf.where(indices < 0, indices + x_shape[static_axis], indices) + x = swapaxes(x, static_axis, -1) + indices = swapaxes(indices, static_axis, -1) + x_shape = tf.shape(x) + x = tf.reshape(x, [-1, x_shape[-1]]) + indices_shape = tf.shape(indices) + indices = tf.reshape(indices, [-1, indices_shape[-1]]) + result = tf.gather(x, indices, batch_dims=1) + result = tf.reshape(result, indices_shape) + result = swapaxes(result, static_axis, -1) + result.set_shape(static_output_shape) + return result + +@sparse.elementwise_unary +def tan(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.tan(x) + +@sparse.elementwise_unary +def tanh(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.tanh(x) + +def tensordot(x1, x2, axes=2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + x1 = tf.cast(x1, compute_dtype) + x2 = tf.cast(x2, compute_dtype) + return tf.cast(tf.tensordot(x1, x2, axes=axes), dtype=result_dtype) + +@sparse.elementwise_unary +def round(x, decimals=0): + if decimals == 0: + return tf.round(x) + x_dtype = x.dtype + if tf.as_dtype(x_dtype).is_integer: + if decimals > 0: + return x + factor = tf.cast(math.pow(10, decimals), config.floatx()) + x = tf.cast(x, config.floatx()) + else: + factor = tf.cast(math.pow(10, decimals), x.dtype) + x = tf.multiply(x, factor) + x = tf.round(x) + x = tf.divide(x, factor) + return tf.cast(x, x_dtype) + +def tile(x, repeats): + x = convert_to_tensor(x) + repeats = tf.reshape(convert_to_tensor(repeats, dtype='int32'), [-1]) + repeats_size = tf.size(repeats) + repeats = tf.pad(repeats, [[tf.maximum(x.shape.rank - repeats_size, 0), 0]], constant_values=1) + x_shape = tf.pad(tf.shape(x), [[tf.maximum(repeats_size - x.shape.rank, 0), 0]], constant_values=1) + x = tf.reshape(x, x_shape) + return tf.tile(x, repeats) + +def trace(x, offset=0, axis1=0, axis2=1): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype not in ('int64', 'uint32', 'uint64'): + dtype = dtypes.result_type(dtype, 'int32') + x_shape = tf.shape(x) + x = moveaxis(x, (axis1, axis2), (-2, -1)) + x = tf.where(eye(x_shape[axis1], x_shape[axis2], k=offset, dtype='bool'), x, tf.zeros_like(x)) + if standardize_dtype(x.dtype) == 'bool': + x = tf.cast(x, 'int32') + return tf.cast(tf.reduce_sum(x, axis=(-2, -1)), dtype) + +def tri(N, M=None, k=0, dtype=None): + M = M if M is not None else N + dtype = standardize_dtype(dtype or config.floatx()) + if k < 0: + lower = -k - 1 + if lower > N: + r = tf.zeros([N, M], dtype=dtype) + else: + o = tf.ones([N, M], dtype='bool') + r = tf.cast(tf.logical_not(tf.linalg.band_part(o, lower, -1)), dtype=dtype) + else: + o = tf.ones([N, M], dtype=dtype) + if k > M: + r = o + else: + r = tf.linalg.band_part(o, -1, k) + return r + +def tril(x, k=0): + x = convert_to_tensor(x) + + def _negative_k_branch(): + shape = tf.shape(x) + (rows, cols) = (shape[-2], shape[-1]) + (i, j) = tf.meshgrid(tf.range(rows), tf.range(cols), indexing='ij') + mask = i >= j - k + return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x)) + return tf.cond(k >= 0, lambda : tf.linalg.band_part(x, -1, k), _negative_k_branch) + +def triu(x, k=0): + x = convert_to_tensor(x) + + def _positive_k_branch(): + shape = tf.shape(x) + (rows, cols) = (shape[-2], shape[-1]) + (i, j) = tf.meshgrid(tf.range(rows), tf.range(cols), indexing='ij') + mask = i <= j - k + return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x)) + return tf.cond(k <= 0, lambda : tf.linalg.band_part(x, -k, -1), _positive_k_branch) + +def trunc(x): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype == 'bool' or 'int' in dtype: + return x + return tf.where(x < 0, tf.math.ceil(x), tf.math.floor(x)) + +def vdot(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + x1 = tf.cast(x1, compute_dtype) + x2 = tf.cast(x2, compute_dtype) + x1 = tf.reshape(x1, [-1]) + x2 = tf.reshape(x2, [-1]) + return tf.cast(dot(x1, x2), result_dtype) + +def vstack(xs): + dtype_set = set([getattr(x, 'dtype', type(x)) for x in xs]) + if len(dtype_set) > 1: + dtype = dtypes.result_type(*dtype_set) + xs = tree.map_structure(lambda x: convert_to_tensor(x, dtype), xs) + return tf.concat(xs, axis=0) + +def _vmap_fn(fn, in_axes=0): + if in_axes != 0: + raise ValueError('Not supported with `vectorize()` with the TensorFlow backend.') + + @functools.wraps(fn) + def wrapped(x): + return tf.vectorized_map(fn, x) + return wrapped + +def vectorize(pyfunc, *, excluded=None, signature=None): + return vectorize_impl(pyfunc, _vmap_fn, excluded=excluded, signature=signature) + +def where(condition, x1, x2): + condition = tf.cast(condition, 'bool') + if x1 is not None and x2 is not None: + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.where(condition, x1, x2) + if x1 is None and x2 is None: + return nonzero(condition) + raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.') + +@sparse.elementwise_division +def divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.divide(x1, x2) + +def divide_no_nan(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.math.divide_no_nan(x1, x2) + +def true_divide(x1, x2): + return divide(x1, x2) + +def power(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if 'uint' in dtype: + x1 = convert_to_tensor(x1, 'int32') + x2 = convert_to_tensor(x2, 'int32') + return tf.cast(tf.pow(x1, x2), dtype) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.pow(x1, x2) + +@sparse.elementwise_unary +def negative(x): + return tf.negative(x) + +@sparse.elementwise_unary +def square(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = tf.cast(x, 'int32') + return tf.square(x) + +@sparse.elementwise_unary +def sqrt(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + return tf.math.sqrt(x) + +def squeeze(x, axis=None): + x = convert_to_tensor(x) + axis = to_tuple_or_list(axis) + static_shape = x.shape.as_list() + if axis is not None: + for a in axis: + if static_shape[a] != 1: + raise ValueError(f'Cannot squeeze axis={a}, because the dimension is not 1.') + axis = sorted([canonicalize_axis(a, len(static_shape)) for a in axis]) + if isinstance(x, tf.SparseTensor): + dynamic_shape = tf.shape(x) + new_shape = [] + gather_indices = [] + for (i, dim) in enumerate(static_shape): + if not (dim == 1 if axis is None else i in axis): + new_shape.append(dim if dim is not None else dynamic_shape[i]) + gather_indices.append(i) + new_indices = tf.gather(x.indices, gather_indices, axis=1) + return tf.SparseTensor(new_indices, x.values, tuple(new_shape)) + return tf.squeeze(x, axis=axis) + +def transpose(x, axes=None): + if isinstance(x, tf.SparseTensor): + from keras.src.ops.operation_utils import compute_transpose_output_shape + output = tf.sparse.transpose(x, perm=axes) + output.set_shape(compute_transpose_output_shape(x.shape, axes)) + return output + return tf.transpose(x, perm=axes) + +def var(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, compute_dtype) + return tf.cast(tf.math.reduce_variance(x, axis=axis, keepdims=keepdims), result_dtype) + +def sum(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype in ('bool', 'int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + x = cast(x, dtype) + if isinstance(x, tf.SparseTensor): + return tf.sparse.reduce_sum(x, axis=axis, keepdims=keepdims, output_is_sparse=True) + return tf.reduce_sum(x, axis=axis, keepdims=keepdims) + +def eye(N, M=None, k=0, dtype=None): + dtype = dtype or config.floatx() + if not M: + M = N + (N, M, k) = (int(N), int(M), int(k)) + if k >= M or -k >= N: + return zeros([N, M], dtype=dtype) + if k == 0: + return tf.eye(N, M, dtype=dtype) + diag_len = builtins.min(N, M) + if k > 0: + if N >= M: + diag_len -= k + elif N + k > M: + diag_len = M - k + elif k <= 0: + if M >= N: + diag_len += k + elif M - k > N: + diag_len = N + k + diagonal_ = tf.ones([diag_len], dtype=dtype) + return tf.linalg.diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k) + +def floor_divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return tf.math.floordiv(x1, x2) + +def logical_xor(x1, x2): + x1 = tf.cast(x1, 'bool') + x2 = tf.cast(x2, 'bool') + return tf.math.logical_xor(x1, x2) + +def correlate(x1, x2, mode='valid'): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if dtype == tf.int64: + dtype = tf.float64 + elif dtype not in [tf.bfloat16, tf.float16, tf.float64]: + dtype = tf.float32 + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + (x1_len, x2_len) = (int(x1.shape[0]), int(x2.shape[0])) + if mode == 'full': + full_len = x1_len + x2_len - 1 + x1_pad = (full_len - x1_len) / 2 + x2_pad = (full_len - x2_len) / 2 + x1 = tf.pad(x1, paddings=[[tf.math.floor(x1_pad), tf.math.ceil(x1_pad)]]) + x2 = tf.pad(x2, paddings=[[tf.math.floor(x2_pad), tf.math.ceil(x2_pad)]]) + x1 = tf.reshape(x1, (1, full_len, 1)) + x2 = tf.reshape(x2, (full_len, 1, 1)) + return tf.squeeze(tf.nn.conv1d(x1, x2, stride=1, padding='SAME')) + x1 = tf.reshape(x1, (1, x1_len, 1)) + x2 = tf.reshape(x2, (x2_len, 1, 1)) + return tf.squeeze(tf.nn.conv1d(x1, x2, stride=1, padding=mode.upper())) + +def select(condlist, choicelist, default=0): + return tf.experimental.numpy.select(condlist, choicelist, default=default) + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(tf.linalg.slogdet(x)) + +def argpartition(x, kth, axis=-1): + x = convert_to_tensor(x, tf.int32) + x = swapaxes(x, axis, -1) + bottom_ind = tf.math.top_k(-x, kth + 1).indices + n = tf.shape(x)[-1] + mask = tf.reduce_sum(tf.one_hot(bottom_ind, n, dtype=tf.int32), axis=0) + indices = tf.where(mask) + updates = tf.squeeze(tf.zeros(tf.shape(indices)[0], dtype=tf.int32)) + final_mask = tf.tensor_scatter_nd_update(x, indices, updates) + top_ind = tf.math.top_k(final_mask, tf.shape(x)[-1] - kth - 1).indices + out = tf.concat([bottom_ind, top_ind], axis=x.ndim - 1) + return swapaxes(out, -1, axis) + +# File: keras-master/keras/src/backend/tensorflow/optimizer.py +import warnings +import tensorflow as tf +from keras.src import backend +from keras.src.backend.common import KerasVariable +from keras.src.backend.tensorflow.trackable import KerasAutoTrackable +from keras.src.optimizers import base_optimizer + +class TFOptimizer(KerasAutoTrackable, base_optimizer.BaseOptimizer): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._distribution_strategy = tf.distribute.get_strategy() + + def add_variable_from_reference(self, reference_variable, name=None, initializer='zeros'): + if isinstance(reference_variable, backend.Variable): + colocate_var = reference_variable.value + else: + colocate_var = reference_variable + with self._distribution_strategy.extended.colocate_vars_with(colocate_var): + return super().add_variable_from_reference(reference_variable, name=name, initializer=initializer) + + def stateless_apply(self, optimizer_variables, grads, trainable_variables): + raise ValueError('stateless_apply is not supported with the TensorFlow backend (as it is incompatible with tf.distribute).') + + def assign(self, variable, value): + if isinstance(variable, KerasVariable): + variable = variable.value + value = tf.cast(value, variable.dtype) + if isinstance(value, tf.IndexedSlices): + variable.scatter_update(value) + else: + variable.assign(value) + + def assign_add(self, variable, value): + if isinstance(variable, KerasVariable): + variable = variable.value + value = tf.cast(value, variable.dtype) + if isinstance(value, tf.IndexedSlices): + variable.scatter_add(value) + else: + variable.assign_add(value) + + def assign_sub(self, variable, value): + if isinstance(variable, KerasVariable): + variable = variable.value + value = tf.cast(value, variable.dtype) + if isinstance(value, tf.IndexedSlices): + variable.scatter_sub(value) + else: + variable.assign_sub(value) + + def _var_key(self, variable): + if isinstance(variable, backend.Variable): + variable = variable.value + if hasattr(variable, '_distributed_container'): + variable = variable._distributed_container() + elif isinstance(variable, tf.__internal__.CompositeTensor) and hasattr(variable, 'handle') and hasattr(variable.handle, '_distributed_container'): + variable = variable.handle._distributed_container() + return variable._unique_id + + def _apply_weight_decay(self, variables): + if self.weight_decay is None: + return + + def distributed_apply_weight_decay(distribution, variables, **kwargs): + + def weight_decay_fn(variable): + if self._use_weight_decay(variable): + lr = tf.cast(self.learning_rate, variable.dtype) + wd = tf.cast(self.weight_decay, variable.dtype) + variable.assign_sub(variable * wd * lr) + for variable in variables: + if isinstance(variable, backend.Variable): + variable = variable.value + distribution.extended.update(variable, weight_decay_fn, group=False) + tf.__internal__.distribute.interim.maybe_merge_call(distributed_apply_weight_decay, self._distribution_strategy, variables) + + def _backend_update_step(self, grads, trainable_variables, learning_rate): + trainable_variables = [v.value if isinstance(v, backend.Variable) else v for v in trainable_variables] + grads_and_vars = list(zip(grads, trainable_variables)) + grads_and_vars = self._all_reduce_sum_gradients(grads_and_vars) + tf.__internal__.distribute.interim.maybe_merge_call(self._distributed_tf_update_step, self._distribution_strategy, grads_and_vars, learning_rate) + + def _distributed_tf_update_step(self, distribution, grads_and_vars, learning_rate): + + def apply_grad_to_update_var(var, grad, learning_rate): + return self.update_step(grad, var, learning_rate) + for (grad, var) in grads_and_vars: + distribution.extended.update(var, apply_grad_to_update_var, args=(grad, learning_rate), group=False) + + def _all_reduce_sum_gradients(self, grads_and_vars): + replica_context = tf.distribute.get_replica_context() + if not replica_context: + return grads_and_vars + grads_and_vars = list(grads_and_vars) + filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) + if filtered_grads_and_vars: + grads = [pair[0] for pair in filtered_grads_and_vars] + reduced = tf.distribute.get_replica_context().all_reduce(tf.distribute.ReduceOp.SUM, grads) + else: + reduced = [] + reduced_with_nones = [] + reduced_pos = 0 + for (g, v) in grads_and_vars: + if g is None: + reduced_with_nones.append((None, v)) + else: + reduced_with_nones.append((reduced[reduced_pos], v)) + reduced_pos += 1 + assert reduced_pos == len(reduced), 'Failed to add all gradients' + return reduced_with_nones + + def _overwrite_model_variables_with_average_value(self, trainable_variables): + trainable_variables = [v.value if isinstance(v, backend.Variable) else v for v in trainable_variables] + for (var, average_var) in zip(trainable_variables, self._model_variables_moving_average): + self._distribution_strategy.extended.update(var, lambda a, b: a.assign(b), args=(average_var,)) + + def _backend_increment_gradient_accumulators(self, grads, acc_grads): + + def update_accumulator(var, grad): + var.assign(var + grad) + accumulators = [v.value for v in acc_grads] + + def _distributed_tf_increment_grad_acc(distribution, grads, accumulators): + for (grad, var) in zip(grads, accumulators): + distribution.extended.update(var, update_accumulator, args=(grad,), group=False) + tf.__internal__.distribute.interim.maybe_merge_call(_distributed_tf_increment_grad_acc, self._distribution_strategy, grads, accumulators) + + def _clip_by_norm(self, values, axes=None): + return tf.clip_by_norm(values, self.clipnorm, axes) + +def filter_empty_gradients(grads_and_vars): + grads_and_vars = tuple(grads_and_vars) + if not grads_and_vars: + return grads_and_vars + filtered = [] + vars_with_empty_grads = [] + for (grad, var) in grads_and_vars: + if grad is None: + vars_with_empty_grads.append(var) + else: + filtered.append((grad, var)) + filtered = tuple(filtered) + if not filtered: + variable = ([v.name for (_, v) in grads_and_vars],) + raise ValueError(f'No gradients provided for any variable: {variable}. Provided `grads_and_vars` is {grads_and_vars}.') + if vars_with_empty_grads: + warnings.warn("Gradients do not exist for variables %s when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?", [v.name for v in vars_with_empty_grads]) + return filtered + +# File: keras-master/keras/src/backend/tensorflow/random.py +import tensorflow as tf +from keras.src.backend.common import standardize_dtype +from keras.src.backend.config import floatx +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed + +def _cast_seed(seed): + if standardize_dtype(seed.dtype) == 'int32': + return seed + else: + seed = tf.cast(tf.math.floormod(seed, tf.int32.max - 1), dtype='int32') + return seed + +def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = _cast_seed(draw_seed(seed)) + return tf.random.stateless_normal(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + +def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = _cast_seed(draw_seed(seed)) + return tf.random.stateless_uniform(shape=shape, minval=tf.cast(minval, dtype), maxval=tf.cast(maxval, dtype), dtype=dtype, seed=seed) + +def categorical(logits, num_samples, dtype='int64', seed=None): + seed = _cast_seed(draw_seed(seed)) + output = tf.random.stateless_categorical(logits, num_samples, seed=seed) + return tf.cast(output, dtype) + +def randint(shape, minval, maxval, dtype='int32', seed=None): + intermediate_dtype = dtype + if standardize_dtype(dtype) not in ['int32', 'int64']: + intermediate_dtype = 'int64' + seed = _cast_seed(draw_seed(seed)) + output = tf.random.stateless_uniform(shape=shape, minval=minval, maxval=maxval, dtype=intermediate_dtype, seed=seed) + return tf.cast(output, dtype) + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + seed = _cast_seed(draw_seed(seed)) + return tf.random.stateless_truncated_normal(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + +def _get_concrete_noise_shape(inputs, noise_shape): + if noise_shape is None: + return tf.shape(inputs) + concrete_inputs_shape = tf.shape(inputs) + concrete_noise_shape = [] + for (i, value) in enumerate(noise_shape): + concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value) + return concrete_noise_shape + +def dropout(inputs, rate, noise_shape=None, seed=None): + seed = _cast_seed(draw_seed(seed)) + noise_shape = _get_concrete_noise_shape(inputs, noise_shape) + return tf.nn.experimental.stateless_dropout(inputs, rate=rate, noise_shape=noise_shape, seed=seed) + +def shuffle(x, axis=0, seed=None): + from keras.src.backend.tensorflow.numpy import swapaxes + seed = _cast_seed(draw_seed(seed)) + if axis == 0: + return tf.random.experimental.stateless_shuffle(x, seed=seed) + x = swapaxes(x, axis1=0, axis2=axis) + x = tf.random.experimental.stateless_shuffle(x, seed=seed) + x = swapaxes(x, axis1=0, axis2=axis) + return x + +def gamma(shape, alpha, dtype=None, seed=None): + dtype = dtype or floatx() + seed = _cast_seed(draw_seed(seed)) + intermediate_dtype = dtype + if standardize_dtype(dtype) == 'bfloat16': + intermediate_dtype = 'float32' + return tf.cast(tf.random.stateless_gamma(shape, alpha=alpha, dtype=intermediate_dtype, seed=seed), dtype) + +def binomial(shape, counts, probabilities, dtype=None, seed=None): + dtype = dtype or floatx() + seed = _cast_seed(draw_seed(seed)) + intermediate_dtype = dtype + if standardize_dtype(dtype) == 'bfloat16': + intermediate_dtype = 'float32' + return tf.cast(tf.random.stateless_binomial(shape=shape, seed=seed, counts=counts, probs=probabilities, output_dtype=intermediate_dtype), dtype) + +def beta(shape, alpha, beta, dtype=None, seed=None): + dtype = dtype or floatx() + seed_1 = _cast_seed(draw_seed(seed)) + seed_2 = seed_1 + 12 + intermediate_dtype = dtype + if standardize_dtype(dtype) == 'bfloat16': + intermediate_dtype = 'float32' + alpha = tf.convert_to_tensor(alpha, dtype=intermediate_dtype) + beta = tf.convert_to_tensor(beta, dtype=intermediate_dtype) + alpha = tf.broadcast_to(alpha, shape) + beta = tf.broadcast_to(beta, shape) + gamma_a = tf.cast(tf.random.stateless_gamma(shape=shape, seed=seed_1, alpha=alpha, dtype=intermediate_dtype), dtype) + gamma_b = tf.cast(tf.random.stateless_gamma(shape=shape, seed=seed_2, alpha=beta, dtype=intermediate_dtype), dtype) + sample = gamma_a / (gamma_a + gamma_b) + return sample + +# File: keras-master/keras/src/backend/tensorflow/rnn.py +import tensorflow as tf +from keras.src import tree + +def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False, return_all_outputs=True): + input_length = input_length or inputs.shape[1] + + def swap_batch_timestep(input_t): + axes = list(range(len(input_t.shape))) + (axes[0], axes[1]) = (1, 0) + return tf.transpose(input_t, axes) + if not time_major: + inputs = tree.map_structure(swap_batch_timestep, inputs) + flattened_inputs = tree.flatten(inputs) + time_steps = flattened_inputs[0].shape[0] + time_steps_t = tf.shape(flattened_inputs[0])[0] if time_steps is None else time_steps + for input_ in flattened_inputs: + input_.shape.with_rank_at_least(3) + if mask is not None: + if mask.dtype != tf.bool: + mask = tf.cast(mask, tf.bool) + if len(mask.shape) == 2: + mask = tf.expand_dims(mask, axis=-1) + if not time_major: + mask = swap_batch_timestep(mask) + if constants is None: + constants = [] + + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tree.is_nested(mask_t): + raise ValueError(f'mask_t is expected to be tensor, but got {mask_t}') + if tree.is_nested(input_t): + raise ValueError(f'input_t is expected to be tensor, but got {input_t}') + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = tf.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] + return tf.tile(mask_t, multiples) + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + def _process_single_input_t(input_t): + input_t = tf.unstack(input_t) + if go_backwards: + input_t.reverse() + return input_t + if tree.is_nested(inputs): + processed_input = tree.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tree.pack_sequence_as(inputs, inp) + if mask is not None: + mask_list = tf.unstack(mask) + if go_backwards: + mask_list.reverse() + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + (output, new_states) = step_function(inp, tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + if not successive_outputs: + prev_output = tf.zeros_like(output) + else: + prev_output = successive_outputs[-1] + output = tf.where(tiled_mask_t, output, prev_output) + flat_states = tree.flatten(states) + flat_new_states = tree.flatten(new_states) + tiled_mask_t = tuple((_expand_mask(mask_t, s) for s in flat_states)) + flat_final_states = tuple((tf.where(m, s, ps) for (m, s, ps) in zip(tiled_mask_t, flat_new_states, flat_states))) + states = tree.pack_sequence_as(states, flat_final_states) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + if zero_output_for_mask: + last_output = tf.where(_expand_mask(mask_list[-1], last_output), last_output, tf.zeros_like(last_output)) + outputs = tf.where(_expand_mask(mask, outputs, fixed_dim=2), outputs, tf.zeros_like(outputs)) + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + (output, states) = step_function(inp, tuple(states) + tuple(constants)) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + else: + states = tuple(initial_states) + input_ta = tuple((tf.TensorArray(dtype=inp.dtype, size=time_steps_t, tensor_array_name=f'input_ta_{i}') for (i, inp) in enumerate(flattened_inputs))) + input_ta = tuple((ta.unstack(input_) if not go_backwards else ta.unstack(tf.reverse(input_, [0])) for (ta, input_) in zip(input_ta, flattened_inputs))) + input_time_zero = tree.pack_sequence_as(inputs, [inp[0] for inp in flattened_inputs]) + (output_time_zero, _) = step_function(input_time_zero, tuple(initial_states) + tuple(constants)) + output_ta_size = time_steps_t if return_all_outputs else 1 + output_ta = tuple((tf.TensorArray(dtype=out.dtype, size=output_ta_size, element_shape=out.shape, tensor_array_name=f'output_ta_{i}') for (i, out) in enumerate(tree.flatten(output_time_zero)))) + time = tf.constant(0, dtype='int32', name='time') + if input_length is None: + max_iterations = time_steps_t + else: + max_iterations = tf.reduce_max(input_length) + while_loop_kwargs = {'cond': lambda time, *_: time < time_steps_t, 'maximum_iterations': max_iterations, 'parallel_iterations': 32, 'swap_memory': True} + if mask is not None: + if go_backwards: + mask = tf.reverse(mask, [0]) + mask_ta = tf.TensorArray(dtype=tf.bool, size=time_steps_t, tensor_array_name='mask_ta') + mask_ta = mask_ta.unstack(mask) + + def masking_fn(time): + return mask_ta.read(time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + tiled_mask_t = tuple((_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in flat_out)) + return tuple((tf.where(m, o, fm) for (m, o, fm) in zip(tiled_mask_t, flat_out, flat_mask))) + elif isinstance(input_length, tf.Tensor): + if go_backwards: + max_len = tf.reduce_max(input_length, axis=0) + rev_input_length = tf.subtract(max_len - 1, input_length) + + def masking_fn(time): + return tf.less(rev_input_length, time) + else: + + def masking_fn(time): + return tf.greater(input_length, time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + return tuple((tf.where(mask_t, o, zo) for (o, zo) in zip(flat_out, flat_mask))) + else: + masking_fn = None + if masking_fn is not None: + flat_zero_output = tuple((tf.zeros_like(o) for o in tree.flatten(output_time_zero))) + + def _step(time, output_ta_t, prev_output, *states): + current_input = tuple((ta.read(time) for ta in input_ta)) + current_input = tree.pack_sequence_as(inputs, current_input) + mask_t = masking_fn(time) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_output = tree.flatten(output) + flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output) + flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) + flat_state = tree.flatten(states) + flat_new_state = tree.flatten(new_states) + flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) + new_states = tree.pack_sequence_as(new_states, flat_final_state) + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple((ta.write(ta_index_to_write, out) for (ta, out) in zip(output_ta_t, flat_new_output))) + return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states) + final_outputs = tf.while_loop(body=_step, loop_vars=(time, output_ta, flat_zero_output) + states, **while_loop_kwargs) + new_states = final_outputs[3:] + else: + + def _step(time, output_ta_t, *states): + current_input = tuple((ta.read(time) for ta in input_ta)) + current_input = tree.pack_sequence_as(inputs, current_input) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_new_state = tree.flatten(new_states) + flat_output = tree.flatten(output) + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple((ta.write(ta_index_to_write, out) for (ta, out) in zip(output_ta_t, flat_output))) + new_states = tree.pack_sequence_as(initial_states, flat_new_state) + return (time + 1, output_ta_t) + tuple(new_states) + final_outputs = tf.while_loop(body=_step, loop_vars=(time, output_ta) + states, **while_loop_kwargs) + new_states = final_outputs[2:] + output_ta = final_outputs[1] + outputs = tuple((o.stack() for o in output_ta)) + last_output = tuple((o[-1] for o in outputs)) + outputs = tree.pack_sequence_as(output_time_zero, outputs) + last_output = tree.pack_sequence_as(output_time_zero, last_output) + if not time_major: + outputs = tree.map_structure(swap_batch_timestep, outputs) + return (last_output, outputs, new_states) + +def gru(inputs, initial_state, mask, kernel, recurrent_kernel, bias, activation, recurrent_activation, return_sequences=False, go_backwards=False, unroll=False, time_major=False, reset_after=True): + cudnn_supported = cudnn_ok(activation, recurrent_activation, unroll, use_bias=bias is not None, reset_after=reset_after) + if not cudnn_supported: + raise NotImplementedError + from keras.src.backend.tensorflow import Variable + if isinstance(kernel, Variable): + kernel = kernel.value + if isinstance(recurrent_kernel, Variable): + recurrent_kernel = recurrent_kernel.value + if isinstance(bias, Variable): + bias = bias.value + try: + return _cudnn_gru(inputs, initial_state, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, return_sequences) + except tf.errors.InvalidArgumentError: + raise NotImplementedError + except tf.errors.NotFoundError: + raise NotImplementedError + +def _do_gru_arguments_support_cudnn(activation, recurrent_activation, unroll, use_bias, reset_after): + from keras.src import activations + from keras.src import ops + return activation in (activations.tanh, tf.tanh, ops.tanh) and recurrent_activation in (activations.sigmoid, tf.sigmoid, ops.sigmoid) and (not unroll) and use_bias and reset_after + +def _do_lstm_arguments_support_cudnn(activation, recurrent_activation, unroll, use_bias): + from keras.src import activations + from keras.src import ops + return activation in (activations.tanh, tf.tanh, ops.tanh) and recurrent_activation in (activations.sigmoid, tf.sigmoid, ops.sigmoid) and (not unroll) and use_bias + +def _has_fully_masked_sequence(mask): + return tf.reduce_any(tf.reduce_all(tf.logical_not(tf.cast(mask, dtype='bool')), axis=1)) + +def _assert_valid_mask(mask): + valid = tf.logical_and(tf.logical_not(_has_fully_masked_sequence(mask)), _is_sequence_right_padded(mask)) + tf.Assert(valid, ["You are passing a RNN mask that does not correspond to right-padded sequences, while using cuDNN, which is not supported. With cuDNN, RNN masks can only be used for right-padding, e.g. `[[True, True, False, False]]` would be a valid mask, but any mask that isn't just contiguous `True`'s on the left and contiguous `False`'s on the right would be invalid. You can pass `use_cudnn=False` to your RNN layer to stop using cuDNN (this may be slower)."]) + +def _standardize_cudnn_weights(weights, biases, shape, transpose_weights=False): + + def convert(w): + return tf.transpose(w) if transpose_weights else w + weights = [tf.reshape(convert(x), shape) for x in weights] + biases = [tf.reshape(x, shape) for x in biases] + return tf.concat(weights + biases, axis=0) + +def _is_sequence_right_padded(mask): + max_seq_length = tf.shape(mask)[1] + count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) + right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) + return tf.reduce_all(tf.equal(tf.cast(mask, dtype='bool'), tf.cast(right_padded_mask, dtype='bool'))) + +def _compute_sequence_length_from_mask(mask, time_major): + timestep_index = 0 if time_major else 1 + return tf.reduce_sum(tf.cast(mask, tf.int32), axis=timestep_index) + +def _is_gpu_available(): + return bool(tf.config.list_logical_devices('GPU')) + +def _cudnn_gru(inputs, initial_state, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, return_sequences): + if mask is not None: + _assert_valid_mask(mask) + sequence_lengths = _compute_sequence_length_from_mask(mask, time_major) + else: + if time_major: + batch_dim = tf.shape(inputs)[1] + max_sequence_length = tf.shape(inputs)[0] + else: + batch_dim = tf.shape(inputs)[0] + max_sequence_length = tf.shape(inputs)[1] + sequence_lengths = tf.fill([batch_dim], max_sequence_length) + if not time_major and sequence_lengths is None: + inputs = tf.transpose(inputs, perm=(1, 0, 2)) + (seq_axis, batch_axis) = (0, 1) + else: + (seq_axis, batch_axis) = (0, 1) if time_major else (1, 0) + init_h = tf.expand_dims(initial_state, axis=seq_axis) + weights = tf.split(kernel, 3, axis=1) + weights += tf.split(recurrent_kernel, 3, axis=1) + bias = tf.split(tf.reshape(bias, [-1]), 6) + if tf.sysconfig.get_build_info()['is_cuda_build']: + (weights[0], weights[1]) = (weights[1], weights[0]) + (weights[3], weights[4]) = (weights[4], weights[3]) + (bias[0], bias[1]) = (bias[1], bias[0]) + (bias[3], bias[4]) = (bias[4], bias[3]) + params = _standardize_cudnn_weights(weights=weights, biases=bias, shape=tf.constant([-1]), transpose_weights=True) + if go_backwards: + inputs = tf.reverse_sequence(inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) + (outputs, h, _, _, _) = tf.raw_ops.CudnnRNNV3(input=inputs, input_h=init_h, input_c=0, params=params, is_training=True, rnn_mode='gru', sequence_lengths=sequence_lengths, time_major=time_major) + if go_backwards: + outputs = tf.reverse_sequence(outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) + outputs = tf.reverse(outputs, axis=[seq_axis]) + last_output = outputs[-1] + if not time_major and sequence_lengths is None and return_sequences: + outputs = tf.transpose(outputs, perm=[1, 0, 2]) + state = tf.squeeze(h, axis=seq_axis) + if sequence_lengths is not None: + last_output = state + if not return_sequences: + outputs = tf.expand_dims(last_output, axis=0 if time_major else 1) + return (last_output, outputs, state) + +def cudnn_ok(activation, recurrent_activation, unroll, use_bias, reset_after=None): + if reset_after is None: + args_supported = _do_lstm_arguments_support_cudnn(activation=activation, recurrent_activation=recurrent_activation, unroll=unroll, use_bias=use_bias) + else: + args_supported = _do_gru_arguments_support_cudnn(activation=activation, recurrent_activation=recurrent_activation, unroll=unroll, use_bias=use_bias, reset_after=reset_after) + return args_supported and _is_gpu_available() + +def lstm(inputs, initial_state_h, initial_state_c, mask, kernel, recurrent_kernel, bias, activation, recurrent_activation, return_sequences=False, go_backwards=False, unroll=False, time_major=False): + cudnn_supported = cudnn_ok(activation, recurrent_activation, unroll, use_bias=bias is not None) + if not cudnn_supported: + raise NotImplementedError + from keras.src.backend.tensorflow import Variable + if isinstance(kernel, Variable): + kernel = kernel.value + if isinstance(recurrent_kernel, Variable): + recurrent_kernel = recurrent_kernel.value + if isinstance(bias, Variable): + bias = bias.value + try: + return _cudnn_lstm(inputs, initial_state_h, initial_state_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, return_sequences) + except tf.errors.InvalidArgumentError: + raise NotImplementedError + except tf.errors.NotFoundError: + raise NotImplementedError + +def _cudnn_lstm(inputs, initial_state_h, initial_state_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, return_sequences): + if mask is not None: + _assert_valid_mask(mask) + sequence_lengths = _compute_sequence_length_from_mask(mask, time_major) + else: + if time_major: + batch_dim = tf.shape(inputs)[1] + max_sequence_length = tf.shape(inputs)[0] + else: + batch_dim = tf.shape(inputs)[0] + max_sequence_length = tf.shape(inputs)[1] + sequence_lengths = tf.fill([batch_dim], max_sequence_length) + if not time_major and sequence_lengths is None: + inputs = tf.transpose(inputs, perm=(1, 0, 2)) + (seq_axis, batch_axis) = (0, 1) + else: + (seq_axis, batch_axis) = (0, 1) if time_major else (1, 0) + init_h = tf.expand_dims(initial_state_h, axis=seq_axis) + init_c = tf.expand_dims(initial_state_c, axis=seq_axis) + weights = tf.split(kernel, 4, axis=1) + weights += tf.split(recurrent_kernel, 4, axis=1) + full_bias = tf.concat((tf.zeros_like(bias), bias), 0) + if tf.sysconfig.get_build_info()['is_rocm_build']: + weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)] + full_bias = tf.split(full_bias, 8, axis=0) + full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)] + params = _standardize_cudnn_weights(weights=weights, biases=tf.split(full_bias, 8), shape=tf.constant([-1]), transpose_weights=True) + if go_backwards: + inputs = tf.reverse_sequence(inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) + (outputs, h, c, _, _) = tf.raw_ops.CudnnRNNV3(input=inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode='lstm', sequence_lengths=sequence_lengths, time_major=time_major) + if go_backwards: + outputs = tf.reverse_sequence(outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis) + outputs = tf.reverse(outputs, axis=[seq_axis]) + last_output = outputs[-1] + if not time_major and sequence_lengths is None and return_sequences: + outputs = tf.transpose(outputs, perm=[1, 0, 2]) + h = tf.squeeze(h, axis=seq_axis) + c = tf.squeeze(c, axis=seq_axis) + if sequence_lengths is not None: + last_output = h + if not return_sequences: + outputs = tf.expand_dims(last_output, axis=0 if time_major else 1) + return (last_output, outputs, [h, c]) + +# File: keras-master/keras/src/backend/tensorflow/sparse.py +import functools +import tensorflow as tf +ones_bool = functools.partial(tf.ones, dtype=tf.bool) +ones_int8 = functools.partial(tf.ones, dtype=tf.int8) +zeros_int8 = functools.partial(tf.zeros, dtype=tf.int8) +ones_like_int8 = functools.partial(tf.ones_like, dtype=tf.int8) +zeros_like_int8 = functools.partial(tf.zeros_like, dtype=tf.int8) + +def sparse_to_dense(x, default_value=None): + x_shape = x.shape + if x_shape.rank == 0: + if x.values.shape[0] == 0: + return tf.constant(default_value, dtype=x.dtype) + else: + return tf.reshape(x.values, ()) + x = tf.sparse.to_dense(x, default_value=default_value) + x.set_shape(x_shape) + return x + +def sparse_with_values(x, values): + x_shape = x.shape + x = tf.SparseTensor(x.indices, values, x.dense_shape) + x.set_shape(x_shape) + return x + +def broadcast_scalar_to_sparse_shape(scalar, sparse): + output = tf.broadcast_to(scalar, sparse.dense_shape) + output.set_shape(sparse.shape) + return output + +def sparse_subtract(x1, x2): + if isinstance(x2, tf.SparseTensor): + return tf.sparse.add(x1, tf.sparse.map_values(tf.negative, x2)) + else: + return tf.sparse.add(x1, tf.negative(x2)) + +def sparse_union_indices_and_values(x1, x2_indices, x2_values=None): + zeros2 = tf.SparseTensor(x2_indices, tf.zeros((tf.shape(x2_indices)[0],), x1.values.dtype), x1.dense_shape) + x1_for_union = tf.sparse.add(x1, zeros2) + if x2_values is not None: + x2 = tf.SparseTensor(x2_indices, x2_values, x1.dense_shape) + zeros1 = tf.sparse.map_values(tf.zeros_like, x1) + x2_for_union = tf.sparse.add(x2, zeros1) + return (x1_for_union.indices, x1_for_union.values, x2_for_union.values) + else: + return (x1_for_union.indices, x1_for_union.values, None) + +def indexed_slices_union_indices_and_values(x1, x2_indices, x2_values=None): + dim_0 = x1.dense_shape[0] + x1_indices_expanded = tf.expand_dims(x1.indices, axis=1) + x2_indices_expanded = tf.expand_dims(x2_indices, axis=1) + x1_indices_count = tf.shape(x1_indices_expanded)[0] + x2_indices_count = tf.shape(x2_indices_expanded)[0] + x1_indices_one_hot = tf.scatter_nd(x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,)) + x2_indices_one_hot = tf.scatter_nd(x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,)) + union_indices = tf.squeeze(tf.where(tf.math.logical_or(x1_indices_one_hot, x2_indices_one_hot)), axis=-1) + union_indices_count = tf.shape(union_indices)[0] + + def values_for_union(indices_expanded, indices_count, values): + indices_indices = tf.scatter_nd(indices_expanded, tf.range(1, indices_count + 1), (dim_0,)) + to_union_indices = tf.gather(indices_indices, union_indices) + values_with_leading_zeros = tf.concat([tf.zeros((1,) + values.shape[1:], values.dtype), values], axis=0) + return tf.gather(values_with_leading_zeros, to_union_indices) + x1_values_for_union_indices = tf.cond(tf.equal(x1_indices_count, union_indices_count), lambda : x1.values, lambda : values_for_union(x1_indices_expanded, x1_indices_count, x1.values)) + if x2_values is not None: + x2_values_for_union_indices = tf.cond(tf.equal(x2_indices_count, union_indices_count), lambda : x2_values, lambda : values_for_union(x2_indices_expanded, x2_indices_count, x2_values)) + else: + x2_values_for_union_indices = None + return (union_indices, x1_values_for_union_indices, x2_values_for_union_indices) + +def sparse_intersection_indices_and_values(x1, x2): + ones1 = tf.sparse.map_values(ones_like_int8, x1) + ones2 = tf.sparse.map_values(ones_like_int8, x2) + intersection_extra_dim = tf.sets.intersection(tf.sparse.expand_dims(ones1, axis=-1), tf.sparse.expand_dims(ones2, axis=-1)) + + def empty_intersection(): + return (tf.zeros((0, x1.shape.rank), dtype=tf.int64), tf.zeros((0,), dtype=x1.values.dtype), tf.zeros((0,), dtype=x2.values.dtype)) + + def non_empty_intersection(): + intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape) + zeros1 = tf.sparse.map_values(zeros_like_int8, x1) + zeros2 = tf.sparse.map_values(zeros_like_int8, x2) + mask1 = tf.sparse.add(zeros1, intersection) + mask2 = tf.sparse.add(zeros2, intersection) + return (intersection.indices, tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values, tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values) + return tf.cond(tf.equal(tf.size(intersection_extra_dim), 0), empty_intersection, non_empty_intersection) + +def indexed_slices_intersection_indices_and_values(x1, x2): + dim_0 = x1.dense_shape[0] + x1_indices_expanded = tf.expand_dims(x1.indices, axis=1) + x2_indices_expanded = tf.expand_dims(x2.indices, axis=1) + x1_indices_count = x1_indices_expanded.shape[0] + x2_indices_count = x2_indices_expanded.shape[0] + x1_indices_one_hot = tf.scatter_nd(x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,)) + x2_indices_one_hot = tf.scatter_nd(x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,)) + intersection_indices = tf.squeeze(tf.where(tf.math.logical_and(x1_indices_one_hot, x2_indices_one_hot)), axis=-1) + intersection_indices_count = tf.shape(intersection_indices)[0] + + def empty_intersection(): + return (intersection_indices, tf.zeros((0,) + x1.values.shape[1:], x1.dtype), tf.zeros((0,) + x2.values.shape[1:], x2.dtype)) + + def non_empty_intersection(): + + def values_for_intersection(indices_expanded, indices_count, values): + indices_indices = tf.scatter_nd(indices_expanded, tf.range(indices_count), (dim_0,)) + to_intersection_indices = tf.gather(indices_indices, intersection_indices) + return tf.gather(values, to_intersection_indices) + x1_values_for_intersection = tf.cond(tf.equal(x1_indices_count, intersection_indices_count), lambda : x1.values, lambda : values_for_intersection(x1_indices_expanded, x1_indices_count, x1.values)) + x2_values_for_intersection = tf.cond(tf.equal(x2_indices_count, intersection_indices_count), lambda : x2.values, lambda : values_for_intersection(x2_indices_expanded, x2_indices_count, x2.values)) + return (intersection_indices, x1_values_for_intersection, x2_values_for_intersection) + return tf.cond(tf.equal(intersection_indices_count, 0), empty_intersection, non_empty_intersection) + +def densifying_unary(default_value): + + def wrap_densifying_unary(func): + + @functools.wraps(func) + def sparse_wrapper(x, *args, **kwargs): + if isinstance(x, tf.SparseTensor): + sparse_output = sparse_with_values(x, func(x.values, *args, **kwargs)) + return sparse_to_dense(sparse_output, tf.cast(default_value, sparse_output.values.dtype)) + elif isinstance(x, tf.IndexedSlices): + sparse_output_values = func(x.values, *args, **kwargs) + output = tf.fill(x.dense_shape, tf.cast(default_value, sparse_output_values.dtype)) + return tf.tensor_scatter_nd_update(output, tf.expand_dims(x.indices, 1), sparse_output_values) + return func(x, *args, **kwargs) + return sparse_wrapper + return wrap_densifying_unary + +def elementwise_unary(func): + + @functools.wraps(func) + def sparse_wrapper(x, *args, **kwargs): + if isinstance(x, tf.SparseTensor): + return sparse_with_values(x, func(x.values, *args, **kwargs)) + elif isinstance(x, tf.IndexedSlices): + return tf.IndexedSlices(func(x.values, *args, **kwargs), x.indices, x.dense_shape) + else: + return func(x, *args, **kwargs) + return sparse_wrapper + +def elementwise_binary_union(sparse_op, densify_mixed=False): + + def wrap_elementwise_binary_union(func): + + @functools.wraps(func) + def sparse_wrapper(x1, x2): + if isinstance(x1, tf.SparseTensor): + if isinstance(x2, tf.SparseTensor): + if x1.indices is x2.indices: + return sparse_with_values(x1, func(x1.values, x2.values)) + else: + output = sparse_op(x1, x2) + output.set_shape(x1.shape) + return output + elif densify_mixed: + x1 = sparse_to_dense(x1) + else: + if not hasattr(x2, 'shape') or len(x2.shape) == 0: + x2 = broadcast_scalar_to_sparse_shape(x2, x1) + return sparse_op(x1, x2) + elif isinstance(x2, tf.SparseTensor): + if densify_mixed: + x2 = sparse_to_dense(x2) + else: + if not hasattr(x1, 'shape') or len(x1.shape) == 0: + x1 = broadcast_scalar_to_sparse_shape(x1, x2) + return sparse_op(x1, x2) + elif isinstance(x1, tf.IndexedSlices): + if isinstance(x2, tf.IndexedSlices): + if x1.indices is x2.indices: + return tf.IndexedSlices(func(x1.values, x2.values), x1.indices, x1.dense_shape) + else: + (union_indices, x1_values_for_union, x2_values_for_union) = indexed_slices_union_indices_and_values(x1, x2.indices, x2.values) + return tf.IndexedSlices(func(x1_values_for_union, x2_values_for_union), union_indices, x1.dense_shape) + else: + x1 = tf.convert_to_tensor(x1) + elif isinstance(x2, tf.IndexedSlices): + x2 = tf.convert_to_tensor(x2) + return func(x1, x2) + return sparse_wrapper + return wrap_elementwise_binary_union + +def elementwise_binary_intersection(func): + + @functools.wraps(func) + def sparse_wrapper(x1, x2): + if isinstance(x1, tf.SparseTensor): + if isinstance(x2, tf.SparseTensor): + if x1.indices is x2.indices: + return sparse_with_values(x1, func(x1.values, x2.values)) + else: + (intersection_indices, x1_values_for_intersection, x2_values_for_intersection) = sparse_intersection_indices_and_values(x1, x2) + output = tf.SparseTensor(intersection_indices, func(x1_values_for_intersection, x2_values_for_intersection), x1.dense_shape) + output.set_shape(x1.shape) + return output + elif not hasattr(x2, 'shape') or len(x2.shape) == 0: + return sparse_with_values(x1, func(x1.values, x2)) + else: + return sparse_with_values(x1, func(x1.values, tf.gather_nd(x2, x1.indices))) + elif isinstance(x2, tf.SparseTensor): + if not hasattr(x1, 'shape') or len(x1.shape) == 0: + return sparse_with_values(x2, func(x1, x2.values)) + else: + return sparse_with_values(x2, func(tf.gather_nd(x1, x2.indices), x2.values)) + elif isinstance(x1, tf.IndexedSlices): + if isinstance(x2, tf.IndexedSlices): + if x1.indices is x2.indices: + return tf.IndexedSlices(func(x1.values, x2.values), x1.indices, x1.dense_shape) + else: + (intersection_indices, x1_values_for_intersection, x2_values_for_intersection) = indexed_slices_intersection_indices_and_values(x1, x2) + return tf.IndexedSlices(func(x1_values_for_intersection, x2_values_for_intersection), intersection_indices, x1.dense_shape) + elif not hasattr(x2, 'shape') or len(x2.shape) == 0: + return tf.IndexedSlices(func(x1.values, x2), x1.indices, x1.dense_shape) + else: + return tf.IndexedSlices(func(x1.values, tf.gather(x2, x1.indices)), x1.indices, x1.dense_shape) + elif isinstance(x2, tf.IndexedSlices): + if not hasattr(x1, 'shape') or len(x1.shape) == 0: + return tf.IndexedSlices(func(x1, x2.values), x2.indices, x2.dense_shape) + else: + return tf.IndexedSlices(func(tf.gather(x1, x2.indices), x2.values), x2.indices, x2.dense_shape) + return func(x1, x2) + return sparse_wrapper + +def elementwise_division(func): + + @functools.wraps(func) + def sparse_wrapper(x1, x2): + if isinstance(x1, tf.SparseTensor): + if isinstance(x2, tf.SparseTensor): + x1 = sparse_to_dense(x1) + x2 = sparse_to_dense(x2) + elif not hasattr(x2, 'shape') or len(x2.shape) == 0: + return sparse_with_values(x1, func(x1.values, x2)) + else: + x2_zeros_and_nans = tf.equal(x2, 0) + if not tf.as_dtype(x2.dtype).is_integer: + x2_zeros_and_nans = tf.math.logical_or(x2_zeros_and_nans, tf.math.is_nan(x2)) + + def func_for_x1_indices(): + return sparse_with_values(x1, func(x1.values, tf.gather_nd(x2, x1.indices))) + + def func_for_union_indices(): + x2_zeros_and_nan_indices = tf.where(x2_zeros_and_nans) + (union_indices, x1_values_for_union, _) = sparse_union_indices_and_values(x1, x2_zeros_and_nan_indices) + output = tf.SparseTensor(union_indices, func(x1_values_for_union, tf.gather_nd(x2, union_indices)), x1.dense_shape) + output.set_shape(x1.shape) + return output + return tf.cond(tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices) + elif isinstance(x2, tf.SparseTensor): + x2 = sparse_to_dense(x2) + elif isinstance(x1, tf.IndexedSlices): + if isinstance(x2, tf.IndexedSlices): + x1 = tf.convert_to_tensor(x1) + x2 = tf.convert_to_tensor(x2) + elif not hasattr(x2, 'shape') or len(x2.shape) == 0: + return tf.IndexedSlices(func(x1.values, x2), x1.indices, x1.dense_shape) + else: + x2_zeros_and_nans = tf.equal(x2, 0) + if not tf.as_dtype(x2.dtype).is_integer: + x2_zeros_and_nans = tf.math.logical_or(x2_zeros_and_nans, tf.math.is_nan(x2)) + x2_zeros_and_nans = tf.reduce_any(x2_zeros_and_nans, axis=tuple(range(1, x2.shape.rank))) + + def func_for_x1_indices(): + return tf.IndexedSlices(func(x1.values, tf.gather(x2, x1.indices)), x1.indices, x1.dense_shape) + + def func_for_union_indices(): + x2_zeros_and_nan_indices = tf.squeeze(tf.where(x2_zeros_and_nans), axis=-1) + (union_indices, x1_values_for_union, _) = indexed_slices_union_indices_and_values(x1, x2_zeros_and_nan_indices) + return tf.IndexedSlices(func(x1_values_for_union, tf.gather(x2, union_indices)), union_indices, x1.dense_shape) + return tf.cond(tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices) + elif isinstance(x2, tf.IndexedSlices): + x2 = tf.convert_to_tensor(x2) + return func(x1, x2) + return sparse_wrapper + +# File: keras-master/keras/src/backend/tensorflow/trackable.py +import tensorflow as tf +from keras.src.utils import tracking + +class KerasAutoTrackable(tf.__internal__.tracking.AutoTrackable): + + def __setattr__(self, name, value): + try: + if getattr(self, name) is value: + return + except AttributeError: + pass + if getattr(self, '_self_setattr_tracking', True): + value = sticky_attribute_assignment(trackable=self, value=value, name=name) + super().__setattr__(name, value) + +def sticky_attribute_assignment(trackable, name, value): + if isinstance(value, (tracking.TrackedList, tracking.TrackedDict, tracking.TrackedSet)) and hasattr(trackable, '_tracked'): + trackable._tracked.append(name) + if not tracking.is_tracking_enabled(): + return value + if isinstance(value, tf.__internal__.tracking.Trackable): + trackable._track_trackable(value, name=name, overwrite=True) + return value + +# File: keras-master/keras/src/backend/tensorflow/trainer.py +import contextlib +import warnings +import numpy as np +import tensorflow as tf +from tensorflow.python.eager import context as tf_context +from keras.src import callbacks as callbacks_module +from keras.src import metrics as metrics_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils + +class TensorFlowTrainer(base_trainer.Trainer): + + def __init__(self): + super().__init__() + self.train_function = None + self.test_function = None + self.predict_function = None + if tf.distribute.has_strategy(): + self._distribute_strategy = tf.distribute.get_strategy() + else: + self._distribute_strategy = None + + @property + def distribute_strategy(self): + return self._distribute_strategy or tf.distribute.get_strategy() + + @property + def distribute_reduction_method(self): + return self._distribute_reduction_method or 'auto' + + @distribute_reduction_method.setter + def distribute_reduction_method(self, value): + self._distribute_reduction_method = value + + def train_step(self, data): + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + with tf.GradientTape() as tape: + if self._call_has_training_arg: + y_pred = self(x, training=True) + else: + y_pred = self(x) + loss = self._compute_loss(x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=True) + self._loss_tracker.update_state(loss, sample_weight=tf.shape(tree.flatten(x)[0])[0]) + if self.optimizer is not None: + loss = self.optimizer.scale_loss(loss) + if self.trainable_weights: + trainable_weights = self.trainable_weights + gradients = tape.gradient(loss, trainable_weights) + self.optimizer.apply_gradients(zip(gradients, trainable_weights)) + else: + warnings.warn('The model does not have any trainable weights.') + return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) + + def test_step(self, data): + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + loss = self._compute_loss(x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False) + self._loss_tracker.update_state(loss, sample_weight=tf.shape(tree.flatten(x)[0])[0]) + return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) + + def predict_step(self, data): + (x, _, _) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + return y_pred + + def make_train_function(self, force=False): + if self.train_function is not None and (not force): + return self.train_function + + @tf.autograph.experimental.do_not_convert + def one_step_on_data(data): + return self.train_step(data) + if not self.run_eagerly: + one_step_on_data = tf.function(one_step_on_data, reduce_retracing=True, jit_compile=self.jit_compile) + + @tf.autograph.experimental.do_not_convert + def one_step_on_iterator(iterator): + data = next(iterator) + outputs = self.distribute_strategy.run(one_step_on_data, args=(data,)) + outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='auto') + return outputs + + @tf.autograph.experimental.do_not_convert + def multi_step_on_iterator(iterator): + for _ in range(self.steps_per_execution): + outputs = one_step_on_iterator(iterator) + return outputs + if self.steps_per_execution > 1: + train_function = multi_step_on_iterator + else: + train_function = one_step_on_iterator + if not self.run_eagerly: + train_function = tf.function(train_function, reduce_retracing=True) + self.train_function = train_function + + def make_test_function(self, force=False): + if self.test_function is not None and (not force): + return self.test_function + + @tf.autograph.experimental.do_not_convert + def one_step_on_data(data): + return self.test_step(data) + if not self.run_eagerly and self.jit_compile: + one_step_on_data = tf.function(one_step_on_data, reduce_retracing=True, jit_compile=True) + + @tf.autograph.experimental.do_not_convert + def one_step_on_iterator(iterator): + data = next(iterator) + outputs = self.distribute_strategy.run(one_step_on_data, args=(data,)) + outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='auto') + return outputs + + @tf.autograph.experimental.do_not_convert + def multi_step_on_iterator(iterator): + for _ in range(self.steps_per_execution): + outputs = one_step_on_iterator(iterator) + return outputs + if self.steps_per_execution > 1: + test_function = multi_step_on_iterator + else: + test_function = one_step_on_iterator + if not self.run_eagerly: + test_function = tf.function(test_function, reduce_retracing=True) + self.test_function = test_function + + def make_predict_function(self, force=False): + if self.predict_function is not None and (not force): + return self.predict_function + + @tf.autograph.experimental.do_not_convert + def one_step_on_data(data): + return self.predict_step(data) + if not self.run_eagerly and self.jit_compile: + one_step_on_data = tf.function(one_step_on_data, reduce_retracing=True, jit_compile=True) + + @tf.autograph.experimental.do_not_convert + def one_step_on_data_distributed(data): + data = data[0] + outputs = self.distribute_strategy.run(one_step_on_data, args=(data,)) + outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='concat') + return outputs + + @tf.autograph.experimental.do_not_convert + def multi_step_on_data(data): + outputs = one_step_on_data_distributed(data[:1]) + for single_step_data in data[1:]: + step_outputs = one_step_on_data_distributed([single_step_data]) + outputs = tree.map_structure(lambda t1, t2: concat([t1, t2]), outputs, step_outputs) + return outputs + if self.steps_per_execution > 1: + predict_function = multi_step_on_data + else: + predict_function = one_step_on_data_distributed + if not self.run_eagerly: + predict_function = tf.function(predict_function, reduce_retracing=True) + self.predict_function = predict_function + + @traceback_utils.filter_traceback + def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1): + self._assert_compile_called('fit') + self._eval_epoch_iterator = None + if validation_split and validation_data is None: + ((x, y, sample_weight), validation_data) = array_slicing.train_validation_split((x, y, sample_weight), validation_split=validation_split) + if validation_data is not None: + (val_x, val_y, val_sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(validation_data) + epoch_iterator = TFEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, shuffle=shuffle, class_weight=class_weight, distribute_strategy=self.distribute_strategy, steps_per_execution=self.steps_per_execution) + self._maybe_symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=epochs, steps=epoch_iterator.num_batches, model=self) + self.stop_training = False + self.make_train_function() + callbacks.on_train_begin() + training_logs = None + logs = {} + initial_epoch = self._initial_epoch or initial_epoch + for epoch in range(initial_epoch, epochs): + self.reset_metrics() + callbacks.on_epoch_begin(epoch) + with epoch_iterator.catch_stop_iteration(): + for (step, iterator) in epoch_iterator.enumerate_epoch(): + callbacks.on_train_batch_begin(step) + logs = self.train_function(iterator) + logs = self._pythonify_logs(logs) + callbacks.on_train_batch_end(step, logs) + if self.stop_training: + break + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) + if validation_data is not None and self._should_eval(epoch, validation_freq): + if getattr(self, '_eval_epoch_iterator', None) is None: + self._eval_epoch_iterator = TFEpochIterator(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, distribute_strategy=self.distribute_strategy, steps_per_execution=self.steps_per_execution, steps_per_epoch=validation_steps, shuffle=False) + val_logs = self.evaluate(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps=validation_steps, callbacks=callbacks, return_dict=True, _use_cached_eval_dataset=True) + val_logs = {'val_' + name: val for (name, val) in val_logs.items()} + epoch_logs.update(val_logs) + callbacks.on_epoch_end(epoch, epoch_logs) + training_logs = epoch_logs + if self.stop_training: + break + if isinstance(self.optimizer, optimizers_module.Optimizer) and epochs > 0: + self.optimizer.finalize_variable_values(self.trainable_weights) + if getattr(self, '_eval_epoch_iterator', None) is not None: + del self._eval_epoch_iterator + callbacks.on_train_end(logs=training_logs) + return self.history + + @traceback_utils.filter_traceback + def evaluate(self, x=None, y=None, batch_size=None, verbose='auto', sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs): + self._assert_compile_called('evaluate') + use_cached_eval_dataset = kwargs.pop('_use_cached_eval_dataset', False) + if kwargs: + raise ValueError(f'Arguments not recognized: {kwargs}') + if use_cached_eval_dataset: + epoch_iterator = self._eval_epoch_iterator + else: + epoch_iterator = TFEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, distribute_strategy=self.distribute_strategy, steps_per_execution=self.steps_per_execution) + self._maybe_symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + self.make_test_function() + self.stop_evaluating = False + callbacks.on_test_begin() + logs = {} + self.reset_metrics() + with epoch_iterator.catch_stop_iteration(): + for (step, iterator) in epoch_iterator.enumerate_epoch(): + callbacks.on_test_batch_begin(step) + logs = self.test_function(iterator) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) + if self.stop_evaluating: + break + logs = self._get_metrics_result_or_logs(logs) + callbacks.on_test_end(logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + @traceback_utils.filter_traceback + def predict(self, x, batch_size=None, verbose='auto', steps=None, callbacks=None): + epoch_iterator = TFEpochIterator(x=x, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, distribute_strategy=self.distribute_strategy, steps_per_execution=self.steps_per_execution) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + + def append_to_outputs(batch_outputs, outputs): + if outputs is None: + outputs = tree.map_structure(lambda batch_output: [batch_output], batch_outputs) + else: + tree.map_structure_up_to(batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs) + return outputs + + def get_data(iterator): + data = [] + for _ in range(self.steps_per_execution): + try: + single_step_data = next(iterator) + except (StopIteration, tf.errors.OutOfRangeError) as e: + if hasattr(data, '__len__') and len(data) > 0: + return data + else: + raise e + data.append(single_step_data) + return data + self.make_predict_function() + self.stop_predicting = False + callbacks.on_predict_begin() + outputs = None + with epoch_iterator.catch_stop_iteration(): + for (step, iterator) in epoch_iterator.enumerate_epoch(): + callbacks.on_predict_batch_begin(step) + data = get_data(iterator) + batch_outputs = self.predict_function(data) + outputs = append_to_outputs(batch_outputs, outputs) + callbacks.on_predict_batch_end(step, {'outputs': batch_outputs}) + if self.stop_predicting: + break + callbacks.on_predict_end() + outputs = tree.map_structure_up_to(batch_outputs, potentially_ragged_concat, outputs) + return tree.map_structure(convert_to_np_if_not_ragged, outputs) + + def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False): + self._assert_compile_called('train_on_batch') + if class_weight is not None: + if sample_weight is not None: + raise ValueError(f'Arguments `sample_weight` and `class_weight` cannot be specified at the same time. Received: sample_weight={sample_weight}, class_weight={class_weight}') + sample_weight = data_adapter_utils.class_weight_to_sample_weights(y, class_weight) + self._maybe_symbolic_build(data_batch=(x, y, sample_weight)) + self.make_train_function() + + def data(): + yield (x, y, sample_weight) + logs = self.train_function(data()) + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False): + self._assert_compile_called('test_on_batch') + + def data(): + yield (x, y, sample_weight) + self._maybe_symbolic_build(data_batch=(x, y, sample_weight)) + self.make_test_function() + logs = self.test_function(data()) + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def predict_on_batch(self, x): + self.make_predict_function() + batch_outputs = self.predict_function([(x,)]) + batch_outputs = tree.map_structure(convert_to_np_if_not_ragged, batch_outputs) + return batch_outputs + + @property + def compiled_metrics(self): + + class DeprecatedCompiledMetric: + + def update_state(_, y, y_pred, sample_weight=None): + return self._compiled_metrics_update_state(y, y_pred, sample_weight=sample_weight) + return DeprecatedCompiledMetric() + + def _compiled_metrics_update_state(self, y, y_pred, sample_weight=None): + warnings.warn('`model.compiled_metrics()` is deprecated. Instead, use e.g.:\n```\nfor metric in self.metrics:\n metric.update_state(y, y_pred)\n```\n', stacklevel=2) + for metric in self.metrics: + if isinstance(metric, metrics_module.Mean): + metric.update_state(y_pred, sample_weight=sample_weight) + else: + metric.update_state(y, y_pred, sample_weight=sample_weight) + + def compiled_loss(self, y, y_pred, sample_weight=None, regularization_losses=None): + warnings.warn('`model.compiled_loss()` is deprecated. Instead, use `model.compute_loss(x, y, y_pred, sample_weight, training)`.') + return self.compute_loss(x=None, y=y, y_pred=y_pred, sample_weight=sample_weight) + + def loss(self, y, y_pred, sample_weight=None): + warnings.warn('`model.loss()` is deprecated. Instead, use `model.compute_loss(x, y, y_pred, sample_weight, training)`.') + return self.compute_loss(x=None, y=y, y_pred=y_pred, sample_weight=sample_weight) + + def _maybe_symbolic_build(self, iterator=None, data_batch=None): + if self._distribute_strategy is None: + return + if iterator is not None: + for (_, it) in iterator.enumerate_epoch(): + maybe_distributed_data_batch = next(it) + has_distributed_values = tree.map_structure(lambda x: isinstance(x, tf.distribute.DistributedValues), maybe_distributed_data_batch) + if all(tree.flatten(has_distributed_values)): + data_batch = self.distribute_strategy.reduce('MEAN', maybe_distributed_data_batch, axis=None) + else: + data_batch = maybe_distributed_data_batch + break + with self.distribute_strategy.scope(): + self._symbolic_build(data_batch=data_batch) + +class TFEpochIterator(EpochIterator): + + def __init__(self, distribute_strategy=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self._distribute_strategy = distribute_strategy + dataset = self._get_iterator() + if not isinstance(dataset, tf.distribute.DistributedDataset): + dataset = self._distribute_strategy.experimental_distribute_dataset(dataset) + self._distributed_dataset = dataset + self._steps_seen = 0 + + def _get_iterator(self): + return self.data_adapter.get_tf_dataset() + + def enumerate_epoch(self): + self.data_adapter.on_epoch_begin() + if self.steps_per_epoch: + if not self._current_iterator: + self._current_iterator = iter(self._distributed_dataset) + for step in range(0, self.steps_per_epoch, self.steps_per_execution): + yield (step, self._current_iterator) + else: + iterator = iter(self._distributed_dataset) + if self.num_batches: + for step in range(0, self.num_batches, self.steps_per_execution): + yield (step, iterator) + else: + step = -1 + while True: + step += self.steps_per_execution + self._steps_seen = step + 1 + yield (step, iterator) + self.data_adapter.on_epoch_end() + + def tf_sync(self): + tf_context.async_wait() + + @contextlib.contextmanager + def catch_stop_iteration(self): + try: + yield + self.tf_sync() + except (StopIteration, tf.errors.OutOfRangeError): + if self._num_batches is None: + self._num_batches = self._steps_seen + warnings.warn('Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.', stacklevel=2) + self._current_iterator = None + self.data_adapter.on_epoch_end() + +def reduce_per_replica(values, strategy, reduction): + if reduction == 'auto': + if isinstance(strategy, tf.distribute.TPUStrategy): + reduction = 'first' + else: + reduction = 'mean' + + def _reduce(v): + if _collective_all_reduce_multi_worker(strategy): + if reduction == 'concat': + return _multi_worker_concat(v, strategy) + elif reduction == 'sum': + return strategy.reduce('SUM', v) + elif reduction == 'mean': + return strategy.reduce('MEAN', v, axis=0) + if not _is_per_replica_instance(v): + return v + elif reduction == 'first': + return strategy.experimental_local_results(v)[0] + elif reduction == 'concat': + if _is_tpu_multi_host(strategy): + return _tpu_multi_host_concat(v, strategy) + else: + return concat(strategy.experimental_local_results(v)) + elif reduction == 'sum': + return tf.reduce_sum(strategy.experimental_local_results(v)) + elif reduction == 'mean': + return tf.reduce_mean(strategy.experimental_local_results(v), axis=0) + else: + raise ValueError(f'`reduction` must be one of "first", "concat", "mean", "sum", or "auto". Received: reduction={reduction}.') + return tree.map_structure(_reduce, values) + +def _multi_worker_concat(v, strategy): + replicas = strategy.gather(v, axis=0) + if _is_per_replica_instance(v): + shapes = tf.concat([tf.expand_dims(tf.shape(single_value)[0], axis=0) for single_value in v.values], axis=0) + all_shapes = strategy.gather(shapes, axis=0) + else: + all_shapes = strategy.gather(tf.expand_dims(tf.shape(v)[0], axis=0), axis=0) + replicas = tf.split(replicas, num_or_size_splits=all_shapes, num=strategy.num_replicas_in_sync) + ordered_replicas = [] + num_replicas_per_worker = len(strategy.extended.worker_devices) + for replica_id in range(num_replicas_per_worker): + ordered_replicas += replicas[replica_id::num_replicas_per_worker] + return concat(ordered_replicas) + +def concat(tensors, axis=0): + if isinstance(tensors[0], tf.SparseTensor): + return tf.sparse.concat(axis=axis, sp_inputs=tensors) + elif _is_scalar(tensors[0]): + return tf.stack(tensors, axis=axis) + else: + return tf.concat(tensors, axis=axis) + +def _tpu_multi_host_concat(v, strategy): + replicas = strategy.experimental_local_results(v) + num_replicas_per_host = strategy.extended.num_replicas_per_host + ordered_replicas = [] + for replica_id in range(num_replicas_per_host): + ordered_replicas += replicas[replica_id::num_replicas_per_host] + return concat(ordered_replicas) + +def _collective_all_reduce_multi_worker(strategy): + return isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) and strategy.extended._in_multi_worker_mode() + +def _is_per_replica_instance(obj): + return isinstance(obj, tf.distribute.DistributedValues) and isinstance(obj, tf.__internal__.CompositeTensor) + +def _is_scalar(x): + return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0 + +def _is_tpu_multi_host(strategy): + return _is_tpu_strategy(strategy) and strategy.extended.num_hosts > 1 + +def _is_tpu_strategy(strategy): + return _is_tpu_strategy_class(strategy.__class__) + +def _is_tpu_strategy_class(clz): + + def is_tpu_strat(k): + return k.__name__.startswith('TPUStrategy') + if is_tpu_strat(clz): + return True + return any(map(_is_tpu_strategy_class, clz.__bases__)) + +def convert_to_np_if_not_ragged(x): + if isinstance(x, tf.RaggedTensor): + return x + elif isinstance(x, tf.SparseTensor): + return x + return x.numpy() + +def potentially_ragged_concat(tensors): + if len(tensors) == 1: + return tensors[0] + elif isinstance(tensors[0], tf.SparseTensor): + return tf.sparse.concat(axis=0, sp_inputs=tensors) + elif isinstance(tensors[0], tf.RaggedTensor): + return tf.concat(tensors, axis=0) + non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors]) + constant_dims = tf.math.reduce_all(non_batch_shapes == non_batch_shapes[:1], axis=0) + if tf.math.reduce_all(constant_dims).numpy().item(): + if _is_scalar(tensors[0]): + return tf.stack(tensors, axis=0) + else: + return tf.concat(tensors, axis=0) + constant_inner_dimensions = constant_dims.numpy().tolist()[::-1].index(False) + if constant_inner_dimensions == 0: + constant_inner_shape = None + else: + constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:] + return tf.ragged.constant([tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape).merge_dims(0, 1) + +# File: keras-master/keras/src/backend/torch/__init__.py +"""""" +from keras.src.backend.torch import core +from keras.src.backend.torch import image +from keras.src.backend.torch import linalg +from keras.src.backend.torch import math +from keras.src.backend.torch import nn +from keras.src.backend.torch import numpy +from keras.src.backend.torch import random +from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.torch.core import Variable +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import compute_output_spec +from keras.src.backend.torch.core import cond +from keras.src.backend.torch.core import convert_to_numpy +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import device_scope +from keras.src.backend.torch.core import is_tensor +from keras.src.backend.torch.core import random_seed_dtype +from keras.src.backend.torch.core import scatter +from keras.src.backend.torch.core import shape +from keras.src.backend.torch.core import stop_gradient +from keras.src.backend.torch.core import to_torch_dtype +from keras.src.backend.torch.core import vectorized_map +from keras.src.backend.torch.rnn import cudnn_ok +from keras.src.backend.torch.rnn import gru +from keras.src.backend.torch.rnn import lstm +from keras.src.backend.torch.rnn import rnn + +# File: keras-master/keras/src/backend/torch/core.py +import builtins +import contextlib +import functools +import ml_dtypes +import numpy as np +import torch +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.backend_utils import slice_along_axis +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.stateless_scope import get_stateless_scope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.backend.common.symbolic_scope import SymbolicScope +from keras.src.backend.config import floatx +SUPPORTS_SPARSE_TENSORS = False +if torch.backends.mps.is_available(): + DEFAULT_DEVICE = 'mps' +elif torch.cuda.is_available(): + DEFAULT_DEVICE = 'cuda' +else: + DEFAULT_DEVICE = 'cpu' +TORCH_DTYPES = {'float16': torch.float16, 'float32': torch.float32, 'float64': torch.float64, 'uint8': torch.uint8, 'uint16': torch.int32, 'uint32': torch.int64, 'int8': torch.int8, 'int16': torch.int16, 'int32': torch.int32, 'int64': torch.int64, 'bfloat16': torch.bfloat16, 'bool': torch.bool, 'float8_e4m3fn': torch.float8_e4m3fn, 'float8_e5m2': torch.float8_e5m2} + +@contextlib.contextmanager +def device_scope(device_name): + previous_device = global_state.get_global_attribute('torch_device', None) + current_device = _parse_device_input(device_name) + global_state.set_global_attribute('torch_device', current_device) + try: + yield + finally: + global_state.set_global_attribute('torch_device', previous_device) + +def get_device(): + device = global_state.get_global_attribute('torch_device', None) + if device is None: + return DEFAULT_DEVICE + return device + +def _parse_device_input(device_name): + if isinstance(device_name, str): + device_name = device_name.lower() + if 'gpu' in device_name: + device_name = device_name.replace('gpu', 'cuda') + else: + raise ValueError(f"Invalid value for argument `device_name`. Expected a string like 'gpu:0' or 'cpu'. Received: device_name='{device_name}'") + return device_name + +def to_torch_dtype(dtype): + standardized_dtype = TORCH_DTYPES.get(standardize_dtype(dtype), None) + if standardized_dtype is None: + raise ValueError(f'Unsupported dtype for PyTorch: {dtype}') + return standardized_dtype + +class Variable(KerasVariable): + + def _initialize(self, value): + if isinstance(value, torch.nn.Parameter): + self._value = value + else: + self._value = torch.nn.Parameter(convert_to_tensor(value, dtype=self._dtype), requires_grad=self.trainable).to(get_device()) + + def _direct_assign(self, value): + with torch.no_grad(): + self.value.copy_(value) + + def _convert_to_tensor(self, value, dtype=None): + return convert_to_tensor(value, dtype=dtype) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + args = [arg.value if isinstance(arg, KerasVariable) else arg for arg in args] + if kwargs is None: + kwargs = {} + kwargs = {key: value.value if isinstance(value, KerasVariable) else value for (key, value) in kwargs.items()} + return func(*args, **kwargs) + + def __array__(self, dtype=None): + value = convert_to_numpy(self.value) + if dtype: + return value.astype(dtype) + return value + + @property + def value(self): + + def maybe_use_symbolic_tensor(value): + if str(get_device()) == 'meta' and str(value.device) != 'meta': + return torch.nn.Parameter(torch.empty(size=self._shape, dtype=to_torch_dtype(self._dtype), device='meta'), requires_grad=self.trainable) + return value + if in_stateless_scope(): + scope = get_stateless_scope() + value = scope.get_current_value(self) + if value is not None: + value = self._maybe_autocast(value) + return maybe_use_symbolic_tensor(value) + if self._value is None: + value = self._maybe_autocast(self._initializer(self._shape, dtype=self._dtype)) + else: + value = self._maybe_autocast(self._value) + return maybe_use_symbolic_tensor(value) + + @property + def trainable(self): + return self._trainable + + @trainable.setter + def trainable(self, value): + self._trainable = value + if self._value is not None: + self._value.requires_grad = value + + def __eq__(self, other): + try: + return super().__eq__(other) + except Exception: + return False + +def convert_to_tensor(x, dtype=None, sparse=None): + if sparse: + raise ValueError('`sparse=True` is not supported with torch backend') + if type(x) is Variable: + return x.value + if is_tensor(x): + device = get_device() + if x.device != device: + x = x.to(device) + if dtype is None: + return x + return x.to(to_torch_dtype(dtype)) + if dtype is None: + if isinstance(x, bool): + return torch.as_tensor(x, dtype=torch.bool, device=get_device()) + elif isinstance(x, int): + return torch.as_tensor(x, dtype=torch.int32, device=get_device()) + elif isinstance(x, float): + return torch.as_tensor(x, dtype=to_torch_dtype(floatx()), device=get_device()) + if not isinstance(x, (list, tuple)): + x = np.array(x) + elif len(x) > 0 and any((isinstance(x1, torch.Tensor) for x1 in x)): + return torch.stack([convert_to_tensor(x1) for x1 in x]) + if isinstance(x, np.ndarray): + if x.dtype == np.uint32: + x = x.astype(np.int64) + if standardize_dtype(x.dtype) == 'bfloat16': + x = x.astype(np.float32) + dtype = 'bfloat16' + dtype = dtype or x.dtype + if dtype is None: + dtype = result_type(*[getattr(item, 'dtype', type(item)) for item in tree.flatten(x)]) + dtype = to_torch_dtype(dtype) + return torch.as_tensor(x, dtype=dtype, device=get_device()) + +def convert_to_numpy(x): + + def transform(x): + if is_tensor(x): + if x.requires_grad: + x = x.detach() + if x.device != torch.device('cpu'): + x = x.cpu() + if x.dtype == torch.bfloat16: + return np.array(x.to(torch.float32)).astype(ml_dtypes.bfloat16) + return np.array(x) + if isinstance(x, (list, tuple)): + return np.array([transform(e) for e in x]) + return transform(x) + +def is_tensor(x): + return isinstance(x, torch.Tensor) + +def shape(x): + return tuple(x.shape) + +def cast(x, dtype): + dtype = to_torch_dtype(dtype) + if isinstance(x, KerasVariable): + x = x.value + if is_tensor(x): + if x.dtype == dtype: + return x + else: + return x.to(dtype) + return convert_to_tensor(x, dtype) + +def compute_output_spec(fn, *args, **kwargs): + + def has_none_shape(x): + if isinstance(x, KerasTensor): + return None in x.shape + return False + + def convert_keras_tensor_to_torch(x, fill_value=None): + if isinstance(x, KerasTensor): + shape = list(x.shape) + if fill_value: + for (i, e) in enumerate(shape): + if e is None: + shape[i] = fill_value + return torch.ones(size=shape, dtype=TORCH_DTYPES[x.dtype], device=get_device()) + return x + + def convert_torch_to_keras_tensor(x): + if is_tensor(x): + return KerasTensor(x.shape, standardize_dtype(x.dtype)) + return x + + def symbolic_call(fn, args, kwargs, fill_value): + try: + with device_scope('meta'): + (meta_args, meta_kwargs) = tree.map_structure(lambda x: convert_keras_tensor_to_torch(x, fill_value), (args, kwargs)) + return fn(*meta_args, **meta_kwargs) + except: + with device_scope(DEFAULT_DEVICE): + (eager_args, eager_kwargs) = tree.map_structure(lambda x: convert_keras_tensor_to_torch(x, fill_value), (args, kwargs)) + return fn(*eager_args, **eager_kwargs) + with StatelessScope(), SymbolicScope(), torch.no_grad(): + outputs = symbolic_call(fn, args, kwargs, fill_value=83) + none_in_shape = any(builtins.map(has_none_shape, tree.flatten((args, kwargs)))) + if none_in_shape: + outputs_1 = outputs + outputs_2 = symbolic_call(fn, args, kwargs, fill_value=89) + flat_out_1 = tree.flatten(outputs_1) + flat_out_2 = tree.flatten(outputs_2) + flat_out = [] + for (x1, x2) in zip(flat_out_1, flat_out_2): + shape = list(x1.shape) + for (i, e) in enumerate(x2.shape): + if e != shape[i]: + shape[i] = None + flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype))) + outputs = tree.pack_sequence_as(outputs_1, flat_out) + output_spec = tree.map_structure(convert_torch_to_keras_tensor, outputs) + return output_spec + +def cond(pred, true_fn, false_fn): + if get_device() == 'meta': + return true_fn() + if pred: + return true_fn() + return false_fn() + +def vectorized_map(function, elements): + return torch.vmap(function)(elements) + +def map(f, xs): + + def g(_, x): + return ((), f(x)) + (_, ys) = scan(g, (), xs) + return ys + +def scan(f, init, xs=None, length=None, reverse=False, unroll=1): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + if not isinstance(unroll, bool): + if not isinstance(unroll, int) or unroll < 1: + raise ValueError(f'`unroll` must be an positive integer or boolean. Received: unroll={unroll}') + if xs is None and length is None: + raise ValueError('Got no `xs` to scan over and `length` not provided.') + input_is_sequence = tree.is_nested(xs) + output_is_sequence = tree.is_nested(init) + + def pack_input(x): + return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0] + + def pack_output(x): + return tree.pack_sequence_as(init, x) if output_is_sequence else x[0] + if xs is None: + xs_flat = [] + n = int(length) + else: + xs_flat = tree.flatten(xs) + xs_flat = [convert_to_tensor(elem) for elem in xs_flat] + n = int(length) if length is not None else shape(xs_flat[0])[0] + init_flat = tree.flatten(init) + init_flat = [convert_to_tensor(init) for init in init_flat] + init = pack_output(init_flat) + dummy_y = [torch.zeros_like(init) for init in init_flat] + carry = init + ys = [] + maybe_reversed = reversed if reverse else lambda x: x + for i in maybe_reversed(range(n)): + xs_slice = [x[i] for x in xs_flat] + packed_xs = pack_input(xs_slice) if len(xs_slice) > 0 else None + (carry, y) = f(carry, packed_xs) + ys.append(y if y is not None else dummy_y) + stacked_y = tree.map_structure(lambda *ys: torch.stack(ys), *maybe_reversed(ys)) + return (carry, stacked_y) + +def associative_scan(f, elems, reverse=False, axis=0): + if not callable(f): + raise TypeError(f'`f` should be a callable. Received: f={f}') + elems_flat = tree.flatten(elems) + elems_flat = [convert_to_tensor(elem) for elem in elems_flat] + if reverse: + elems_flat = [torch.flip(elem, (axis,)) for elem in elems_flat] + + def _combine(a_flat, b_flat): + a_flat = [convert_to_tensor(a) for a in a_flat] + b_flat = [convert_to_tensor(b) for b in b_flat] + a = tree.pack_sequence_as(elems, a_flat) + b = tree.pack_sequence_as(elems, b_flat) + c = f(a, b) + c_flat = tree.flatten(c) + return c_flat + num_elems = int(elems_flat[0].shape[axis]) + if not all((int(elem.shape[axis]) == num_elems for elem in elems_flat[1:])): + raise ValueError('Array inputs to associative_scan must have the same first dimension. (saw: {})'.format([elem.shape for elem in elems_flat])) + + def _interleave(a, b, axis): + assert a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1 + a_shape = list(a.shape) + a_shape[axis] = a.shape[axis] * 2 - 1 + b_shape = list(b.shape) + b_shape[axis] = b.shape[axis] * 2 - 1 + a_dil = torch.zeros(a_shape) + slice_along_axis(a_dil, 0, None, 2, axis).copy_(a) + b_dil = torch.zeros(b_shape) + slice_along_axis(b_dil, 0, None, 2, axis).copy_(b) + a_pad = [[0, 0] for _ in range(a.dim())] + a_pad[axis][-1] = 1 if a.shape[axis] == b.shape[axis] else 0 + a_pad = a_pad[::-1] + a_pad = tree.flatten(a_pad) + b_pad = [[0, 0] for _ in range(b.dim())] + b_pad[axis] = [1, 0] if a.shape[axis] == b.shape[axis] else [1, 1] + b_pad = b_pad[::-1] + b_pad = tree.flatten(b_pad) + op = torch.bitwise_or if a.dtype == torch.bool else torch.add + return op(torch.nn.functional.pad(a_dil, a_pad), torch.nn.functional.pad(b_dil, b_pad)) + + def _scan(elems): + num_elems = elems[0].shape[axis] + if num_elems < 2: + return elems + reduced_elems = _combine([slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems], [slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems]) + odd_elems = _scan(reduced_elems) + if num_elems % 2 == 0: + even_elems = _combine([slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems], [slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems]) + else: + even_elems = _combine(odd_elems, [slice_along_axis(e, 2, None, step=2, axis=axis) for e in elems]) + even_elems = [torch.cat([slice_along_axis(elem, 0, 1, axis=axis), result], dim=axis) for (elem, result) in zip(elems, even_elems)] + return list(builtins.map(functools.partial(_interleave, axis=axis), even_elems, odd_elems)) + scans = _scan(elems_flat) + if reverse: + scans = [torch.flip(scanned, (axis,)) for scanned in scans] + return tree.pack_sequence_as(elems, scans) + +def scatter(indices, values, shape): + indices = convert_to_tensor(indices) + values = convert_to_tensor(values) + zeros = torch.zeros(shape, dtype=values.dtype, device=get_device()) + index_length = indices.shape[-1] + value_shape = shape[index_length:] + indices = torch.reshape(indices, [-1, index_length]) + values = torch.reshape(values, [-1] + list(value_shape)) + for i in range(indices.shape[0]): + index = indices[i] + zeros[tuple(index)] += values[i] + return zeros + +def scatter_update(inputs, indices, updates): + inputs = convert_to_tensor(inputs) + indices = convert_to_tensor(indices, dtype='int64') + updates = convert_to_tensor(updates) + indices = torch.transpose(indices, 0, 1) + inputs[tuple(indices)] = updates + return inputs + +def slice(inputs, start_indices, shape): + shape_dtype = to_torch_dtype('int64') + inputs = convert_to_tensor(inputs) + start_indices = convert_to_tensor(start_indices).to(shape_dtype) + shape = convert_to_tensor(shape).to(shape_dtype) + python_slice = __builtins__['slice'] + slices = [python_slice(start_index, start_index + length) for (start_index, length) in zip(start_indices, shape)] + return inputs[slices] + +def slice_update(inputs, start_indices, updates): + shape_dtype = to_torch_dtype('int64') + inputs = convert_to_tensor(inputs) + start_indices = convert_to_tensor(start_indices).to(shape_dtype) + updates = convert_to_tensor(updates) + python_slice = __builtins__['slice'] + slices = [python_slice(start_index, start_index + update_length) for (start_index, update_length) in zip(start_indices, updates.shape)] + outputs = torch.clone(inputs) + outputs[slices] = updates + return outputs + +def switch(index, branches, *operands): + index = convert_to_tensor(index, 'int32') + index = torch.clamp(index, 0, len(branches) - 1) + return branches[index](*operands) + +def while_loop(cond, body, loop_vars, maximum_iterations=None): + current_iter = 0 + iteration_check = lambda iter: maximum_iterations is None or iter < maximum_iterations + is_tuple = isinstance(loop_vars, (tuple, list)) + loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,) + loop_vars = tree.map_structure(convert_to_tensor, loop_vars) + while cond(*loop_vars) and iteration_check(current_iter): + loop_vars = body(*loop_vars) + if not isinstance(loop_vars, (list, tuple)): + loop_vars = (loop_vars,) + loop_vars = tuple(loop_vars) + current_iter += 1 + return loop_vars if is_tuple else loop_vars[0] + +def fori_loop(lower, upper, body_fun, init_val): + val = init_val + for i in range(lower, upper): + val = body_fun(i, val) + return val + +def stop_gradient(variable): + return variable.detach() + +def unstack(x, num=None, axis=0): + return x.unbind(axis) + +def random_seed_dtype(): + return 'int32' + +class custom_gradient: + + def __init__(self, forward_fn): + self.forward_fn = forward_fn + + def __call__(self, *args, **kwargs): + return CustomGradientFunction.apply(self.forward_fn, *args, **kwargs) + +class CustomGradientFunction(torch.autograd.Function): + + @staticmethod + def forward(ctx, forward_fn, *args, **kwargs): + ctx.forward_fn = forward_fn + ctx.save_for_backward(*args) + try: + (output, ctx.grad_fn) = forward_fn(*args, **kwargs) + except: + output = forward_fn(*args, **kwargs) + ctx.grad_fn = lambda *args, **kwargs: torch.full((), float('nan')) + return output + + @staticmethod + def backward(ctx, grad_output): + args = ctx.saved_tensors + grad_fn = ctx.grad_fn + if grad_fn is None: + raise ValueError('grad_fn must be provided for custom gradient') + grads = grad_fn(*args, upstream=grad_output) + if not isinstance(grads, tuple): + grads = (grads,) + return (None,) + grads + +# File: keras-master/keras/src/backend/torch/image.py +import functools +import itertools +import operator +import torch +from keras.src import backend +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.utils.module_utils import torchvision +RESIZE_INTERPOLATIONS = {} +UNSUPPORTED_INTERPOLATIONS = ('lanczos3', 'lanczos5') + +def rgb_to_grayscale(images, data_format=None): + images = convert_to_tensor(images) + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + if images.ndim == 4: + images = images.permute((0, 3, 1, 2)) + elif images.ndim == 3: + images = images.permute((2, 0, 1)) + else: + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + images = torchvision.transforms.functional.rgb_to_grayscale(img=images) + if data_format == 'channels_last': + if len(images.shape) == 4: + images = images.permute((0, 2, 3, 1)) + elif len(images.shape) == 3: + images = images.permute((1, 2, 0)) + return images + +def rgb_to_hsv(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + eps = torch.finfo(dtype).eps + images = torch.where(torch.abs(images) < eps, 0.0, images) + (red, green, blue) = torch.split(images, [1, 1, 1], channels_axis) + red = torch.squeeze(red, channels_axis) + green = torch.squeeze(green, channels_axis) + blue = torch.squeeze(blue, channels_axis) + + def rgb_planes_to_hsv_planes(r, g, b): + value = torch.maximum(torch.maximum(r, g), b) + minimum = torch.minimum(torch.minimum(r, g), b) + range_ = value - minimum + safe_value = torch.where(value > 0, value, 1.0) + safe_range = torch.where(range_ > 0, range_, 1.0) + saturation = torch.where(value > 0, range_ / safe_value, 0.0) + norm = 1.0 / (6.0 * safe_range) + hue = torch.where(value == g, norm * (b - r) + 2.0 / 6.0, norm * (r - g) + 4.0 / 6.0) + hue = torch.where(value == r, norm * (g - b), hue) + hue = torch.where(range_ > 0, hue, 0.0) + (hue < 0.0).to(hue.dtype) + return (hue, saturation, value) + images = torch.stack(rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis) + return images + +def hsv_to_rgb(images, data_format=None): + images = convert_to_tensor(images) + dtype = images.dtype + data_format = backend.standardize_data_format(data_format) + channels_axis = -1 if data_format == 'channels_last' else -3 + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={backend.standardize_dtype(dtype)}') + (hue, saturation, value) = torch.split(images, [1, 1, 1], channels_axis) + hue = torch.squeeze(hue, channels_axis) + saturation = torch.squeeze(saturation, channels_axis) + value = torch.squeeze(value, channels_axis) + + def hsv_planes_to_rgb_planes(hue, saturation, value): + dh = torch.remainder(hue, 1.0) * 6.0 + dr = torch.clip(torch.abs(dh - 3.0) - 1.0, 0.0, 1.0) + dg = torch.clip(2.0 - torch.abs(dh - 2.0), 0.0, 1.0) + db = torch.clip(2.0 - torch.abs(dh - 4.0), 0.0, 1.0) + one_minus_s = 1.0 - saturation + red = value * (one_minus_s + saturation * dr) + green = value * (one_minus_s + saturation * dg) + blue = value * (one_minus_s + saturation * db) + return (red, green, blue) + images = torch.stack(hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis) + return images + +def resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + data_format = backend.standardize_data_format(data_format) + RESIZE_INTERPOLATIONS.update({'bilinear': torchvision.transforms.InterpolationMode.BILINEAR, 'nearest': torchvision.transforms.InterpolationMode.NEAREST_EXACT, 'bicubic': torchvision.transforms.InterpolationMode.BICUBIC}) + if interpolation in UNSUPPORTED_INTERPOLATIONS: + raise ValueError(f'Resizing with Lanczos interpolation is not supported by the PyTorch backend. Received: interpolation={interpolation}.') + if interpolation not in RESIZE_INTERPOLATIONS: + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}') + if fill_mode != 'constant': + raise ValueError(f"Invalid value for argument `fill_mode`. Only `'constant'` is supported. Received: fill_mode={fill_mode}") + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError('Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` can be `True`.') + if not len(size) == 2: + raise ValueError(f'Argument `size` must be a tuple of two elements (height, width). Received: size={size}') + size = tuple(size) + images = convert_to_tensor(images) + if images.ndim not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if data_format == 'channels_last': + if images.ndim == 4: + images = images.permute((0, 3, 1, 2)) + else: + images = images.permute((2, 0, 1)) + if crop_to_aspect_ratio: + shape = images.shape + (height, width) = (shape[-2], shape[-1]) + (target_height, target_width) = size + crop_height = int(float(width * target_height) / target_width) + crop_height = max(min(height, crop_height), 1) + crop_width = int(float(height * target_width) / target_height) + crop_width = max(min(width, crop_width), 1) + crop_box_hstart = int(float(height - crop_height) / 2) + crop_box_wstart = int(float(width - crop_width) / 2) + if len(images.shape) == 4: + images = images[:, :, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + else: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + elif pad_to_aspect_ratio: + shape = images.shape + (height, width) = (shape[-2], shape[-1]) + (target_height, target_width) = size + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if len(images.shape) == 4: + batch_size = images.shape[0] + channels = images.shape[1] + padded_img = torch.ones((batch_size, channels, pad_height + height, pad_width + width), dtype=images.dtype) * fill_value + padded_img[:, :, img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width] = images + else: + channels = images.shape[0] + padded_img = torch.ones((channels, pad_height + height, pad_width + width), dtype=images.dtype) * fill_value + padded_img[:, img_box_hstart:img_box_hstart + height, img_box_wstart:img_box_wstart + width] = images + images = padded_img + resized = torchvision.transforms.functional.resize(img=images, size=size, interpolation=RESIZE_INTERPOLATIONS[interpolation], antialias=antialias) + if data_format == 'channels_last': + if len(images.shape) == 4: + resized = resized.permute((0, 2, 3, 1)) + elif len(images.shape) == 3: + resized = resized.permute((1, 2, 0)) + return resized +AFFINE_TRANSFORM_INTERPOLATIONS = {'nearest': 0, 'bilinear': 1} +AFFINE_TRANSFORM_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'} + +def affine_transform(images, transform, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + data_format = backend.standardize_data_format(data_format) + if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys(): + raise ValueError(f'Invalid value for argument `interpolation`. Expected of one {set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: interpolation={interpolation}') + if fill_mode not in AFFINE_TRANSFORM_FILL_MODES: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected of one {AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}') + images = convert_to_tensor(images) + transform = convert_to_tensor(transform) + if images.ndim not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if transform.ndim not in (1, 2): + raise ValueError(f'Invalid transform rank: expected rank 1 (single transform) or rank 2 (batch of transforms). Received input with shape: transform.shape={transform.shape}') + need_squeeze = False + if images.ndim == 3: + images = images.unsqueeze(dim=0) + need_squeeze = True + if transform.ndim == 1: + transform = transform.unsqueeze(dim=0) + if data_format == 'channels_first': + images = images.permute((0, 2, 3, 1)) + batch_size = images.shape[0] + meshgrid = torch.meshgrid(*[torch.arange(size, dtype=transform.dtype, device=transform.device) for size in images.shape[1:]], indexing='ij') + indices = torch.concatenate([torch.unsqueeze(x, dim=-1) for x in meshgrid], dim=-1) + indices = torch.tile(indices, (batch_size, 1, 1, 1, 1)) + a0 = transform[:, 0].clone() + a2 = transform[:, 2].clone() + b1 = transform[:, 4].clone() + b2 = transform[:, 5].clone() + transform[:, 0] = b1 + transform[:, 2] = b2 + transform[:, 4] = a0 + transform[:, 5] = a2 + transform = torch.nn.functional.pad(transform, pad=[0, 1, 0, 0], mode='constant', value=1) + transform = torch.reshape(transform, (batch_size, 3, 3)) + offset = transform[:, 0:2, 2].clone() + offset = torch.nn.functional.pad(offset, pad=[0, 1, 0, 0]) + transform[:, 0:2, 2] = 0 + coordinates = torch.einsum('Bhwij, Bjk -> Bhwik', indices, transform) + coordinates = torch.moveaxis(coordinates, source=-1, destination=1) + coordinates += torch.reshape(a=offset, shape=(*offset.shape, 1, 1, 1)) + affined = torch.stack([map_coordinates(images[i], coordinates[i], order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation], fill_mode=fill_mode, fill_value=fill_value) for i in range(len(images))]) + if data_format == 'channels_first': + affined = affined.permute((0, 3, 1, 2)) + if need_squeeze: + affined = affined.squeeze(dim=0) + return affined + +def _mirror_index_fixer(index, size): + s = size - 1 + return torch.abs((index + s) % (2 * s) - s) + +def _reflect_index_fixer(index, size): + return torch.floor_divide(_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2) +_INDEX_FIXERS = {'constant': lambda index, size: torch.clip(index, 0, size - 1), 'nearest': lambda index, size: torch.clip(index, 0, size - 1), 'wrap': lambda index, size: index % size, 'mirror': _mirror_index_fixer, 'reflect': _reflect_index_fixer} + +def _is_integer(a): + if not torch.is_floating_point(a) and (not torch.is_complex(a)): + return True + return False + +def _nearest_indices_and_weights(coordinate): + coordinate = coordinate if _is_integer(coordinate) else torch.round(coordinate) + index = coordinate.to(torch.int32) + return [(index, 1)] + +def _linear_indices_and_weights(coordinate): + lower = torch.floor(coordinate) + upper_weight = coordinate - lower + lower_weight = 1 - upper_weight + index = lower.to(torch.int32) + return [(index, lower_weight), (index + 1, upper_weight)] + +def map_coordinates(inputs, coordinates, order, fill_mode='constant', fill_value=0.0): + input_arr = convert_to_tensor(inputs) + coordinate_arrs = [convert_to_tensor(c) for c in coordinates] + if len(coordinate_arrs) != len(input_arr.shape): + raise ValueError(f'First dim of `coordinates` must be the same as the rank of `inputs`. Received inputs with shape: {input_arr.shape} and coordinate leading dim of {len(coordinate_arrs)}') + if len(coordinate_arrs[0].shape) < 1: + dim = len(coordinate_arrs) + shape = (dim,) + coordinate_arrs[0].shape + raise ValueError(f'Invalid coordinates rank: expected at least rank 2. Received input with shape: {shape}') + if isinstance(fill_value, (int, float)) and _is_integer(input_arr): + fill_value = int(fill_value) + if len(coordinates) != len(input_arr.shape): + raise ValueError(f'coordinates must be a sequence of length inputs.shape, but {len(coordinates)} != {len(input_arr.shape)}') + index_fixer = _INDEX_FIXERS.get(fill_mode) + if index_fixer is None: + raise ValueError(f'Invalid value for argument `fill_mode`. Expected one of {set(_INDEX_FIXERS.keys())}. Received: fill_mode={fill_mode}') + if order == 0: + interp_fun = _nearest_indices_and_weights + elif order == 1: + interp_fun = _linear_indices_and_weights + else: + raise NotImplementedError('map_coordinates currently requires order<=1') + if fill_mode == 'constant': + + def is_valid(index, size): + return (0 <= index) & (index < size) + else: + + def is_valid(index, size): + return True + valid_1d_interpolations = [] + for (coordinate, size) in zip(coordinate_arrs, input_arr.shape): + interp_nodes = interp_fun(coordinate) + valid_interp = [] + for (index, weight) in interp_nodes: + fixed_index = index_fixer(index, size) + valid = is_valid(index, size) + valid_interp.append((fixed_index, valid, weight)) + valid_1d_interpolations.append(valid_interp) + outputs = [] + for items in itertools.product(*valid_1d_interpolations): + (indices, validities, weights) = zip(*items) + if all((valid is True for valid in validities)): + contribution = input_arr[indices] + else: + all_valid = functools.reduce(operator.and_, validities) + contribution = torch.where(all_valid, input_arr[indices], fill_value) + outputs.append(functools.reduce(operator.mul, weights) * contribution) + result = functools.reduce(operator.add, outputs) + if _is_integer(input_arr): + result = result if _is_integer(result) else torch.round(result) + return result.to(input_arr.dtype) + +# File: keras-master/keras/src/backend/torch/layer.py +from typing import Iterator +from typing import Tuple +import torch +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.ops.operation import Operation + +class TorchLayer(torch.nn.Module): + + @property + def torch_params(self): + if not hasattr(self, '_torch_params'): + self._track_variables() + return self._torch_params + + def _post_build(self): + if in_stateless_scope(): + return + self._track_variables() + + def _track_variables(self): + self._torch_params = torch.nn.ParameterDict({variable.path: variable.value for variable in self.variables}) + + def named_parameters(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[str, torch.nn.Parameter]]: + if not hasattr(self, '_torch_params'): + self._track_variables() + return torch.nn.Module.named_parameters(self, prefix, recurse, remove_duplicate) + + def forward(self, *args, **kwargs): + return Operation.__call__(self, *args, **kwargs) + + def _setattr_hook(self, name, value): + from keras.src.layers import Layer + if isinstance(value, torch.nn.Module) and (not isinstance(value, Layer)) and (not name == '_torch_params'): + from keras.src.utils.torch_utils import TorchModuleWrapper + if not isinstance(self, TorchModuleWrapper): + value = TorchModuleWrapper(value) + return (name, value) + + def _post_track_variable(self, variable): + if hasattr(self, '_torch_params'): + if variable.path not in self.torch_params: + self.torch_params[variable.path] = variable.value + + def _post_untrack_variable(self, variable): + if hasattr(self, '_torch_params'): + if variable.path in self.torch_params: + self.torch_params.pop(variable.path) + +# File: keras-master/keras/src/backend/torch/linalg.py +import torch +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor + +def cholesky(x): + return torch.linalg.cholesky(x) + +def det(x): + return torch.det(x) + +def eig(x): + return torch.linalg.eig(x) + +def eigh(x): + return torch.linalg.eigh(x) + +def inv(x): + return torch.linalg.inv(x) + +def lu_factor(x): + (LU, pivots) = torch.linalg.lu_factor(x) + return (LU, pivots - 1) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims) + +def qr(x, mode='reduced'): + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + return torch.linalg.qr(x, mode=mode) + +def solve(a, b): + return torch.linalg.solve(a, b) + +def solve_triangular(a, b, lower=False): + if b.ndim == a.ndim - 1: + b = torch.unsqueeze(b, axis=-1) + return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(axis=-1) + return torch.linalg.solve_triangular(a, b, upper=not lower) + +def svd(x, full_matrices=True, compute_uv=True): + if not compute_uv: + return torch.linalg.svdvals(x) + return torch.linalg.svd(x, full_matrices=full_matrices) + +def lstsq(a, b, rcond=None): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return torch.linalg.lstsq(a, b, rcond=rcond)[0] + +# File: keras-master/keras/src/backend/torch/math.py +import math +import torch +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.numpy import pad + +def _segment_reduction_fn(data, segment_ids, reduction_method, num_segments): + num_repeats = torch.prod(torch.tensor(data.shape[1:], device=get_device())).long() + segment_ids = segment_ids.repeat_interleave(num_repeats).view(*data.shape).type(torch.int64) + num_segments = num_segments or len(torch.unique(segment_ids)) + segment_ids = torch.where(segment_ids >= 0, segment_ids, num_segments) + segment_ids = torch.where(segment_ids < num_segments, segment_ids, num_segments) + shape = (num_segments + 1,) + tuple(data.shape[1:]) + if reduction_method == 'amax': + result = torch.ones(*shape, device=get_device()) * -float('Inf') + else: + result = torch.zeros(*shape, device=get_device()) + result = result.scatter_reduce(0, segment_ids, data.float(), reduction_method) + result = result[:-1, ...] + return result.type(data.dtype) + +def segment_sum(data, segment_ids, num_segments=None, **kwargs): + data = convert_to_tensor(data) + segment_ids = convert_to_tensor(segment_ids) + return _segment_reduction_fn(data, segment_ids, 'sum', num_segments) + +def segment_max(data, segment_ids, num_segments=None, **kwargs): + data = convert_to_tensor(data) + segment_ids = convert_to_tensor(segment_ids) + return _segment_reduction_fn(data, segment_ids, 'amax', num_segments) + +def top_k(x, k, sorted=True): + x = convert_to_tensor(x) + return torch.topk(x, k, sorted=sorted) + +def in_top_k(targets, predictions, k): + targets = convert_to_tensor(targets).type(torch.int64) + targets = targets[:, None] + predictions = convert_to_tensor(predictions) + topk_values = top_k(predictions, k).values + targets_values = torch.take_along_dim(predictions, targets, dim=-1) + mask = targets_values >= topk_values + return torch.any(mask, axis=-1) + +def logsumexp(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if axis is None: + max_x = torch.max(x) + return torch.log(torch.sum(torch.exp(x - max_x))) + max_x + max_x = torch.amax(x, dim=axis, keepdim=True) + result = torch.log(torch.sum(torch.exp(x - max_x), dim=axis, keepdim=True)) + max_x + return torch.squeeze(result, dim=axis) if not keepdims else result + +def qr(x, mode='reduced'): + x = convert_to_tensor(x) + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + x = convert_to_tensor(x) + return torch.linalg.qr(x, mode=mode) + +def extract_sequences(x, sequence_length, sequence_stride): + x = convert_to_tensor(x) + return torch.unfold_copy(x, dimension=-1, size=sequence_length, step=sequence_stride) + +def _overlap_sequences(x, sequence_stride): + x = convert_to_tensor(x) + (*batch_shape, num_sequences, sequence_length) = x.shape + if sequence_stride > sequence_length: + raise ValueError(f'`sequence_stride` must equal or less than x.shape[-1]. Received: sequence_stride={sequence_stride}, x.shape[-1]={sequence_length}') + if sequence_stride < sequence_length / num_sequences: + raise ValueError(f'`sequence_stride` must equal or greater than x.shape[-1] / x.shape[-2]. Received: sequence_stride={sequence_stride}, x.shape[-1]={sequence_length}, x.shape[-2]={num_sequences}') + flat_batchsize = math.prod(batch_shape) + x = torch.reshape(x, (flat_batchsize, num_sequences, sequence_length)) + output_size = sequence_stride * (num_sequences - 1) + sequence_length + nstep_per_segment = 1 + (sequence_length - 1) // sequence_stride + padded_segment_len = nstep_per_segment * sequence_stride + x = torch.nn.functional.pad(x, (0, padded_segment_len - sequence_length, 0, 0, 0, 0)) + x = torch.reshape(x, (flat_batchsize, num_sequences, nstep_per_segment, sequence_stride)) + x = torch.permute(x, (0, 2, 1, 3)) + x = torch.nn.functional.pad(x, (0, 0, 0, num_sequences, 0, 0, 0, 0)) + shrinked = x.shape[2] - 1 + x = torch.reshape(x, (flat_batchsize, -1)) + x = x[:, :nstep_per_segment * shrinked * sequence_stride] + x = torch.reshape(x, (flat_batchsize, nstep_per_segment, shrinked * sequence_stride)) + x = torch.sum(x, dim=1)[:, :output_size] + return torch.reshape(x, tuple(batch_shape) + (-1,)) + +def _get_complex_tensor_from_tuple(x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Received: x={x}') + (real, imag) = x + real = convert_to_tensor(real) + imag = convert_to_tensor(imag) + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary.Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if not torch.is_floating_point(real) or not torch.is_floating_point(imag): + raise ValueError(f'At least one tensor in input `x` is not of type float.Received: x={x}.') + complex_input = torch.complex(real, imag) + return complex_input + +def fft(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = torch.fft.fft(complex_input) + return (complex_output.real, complex_output.imag) + +def fft2(x): + complex_input = _get_complex_tensor_from_tuple(x) + complex_output = torch.fft.fft2(complex_input) + return (complex_output.real, complex_output.imag) + +def rfft(x, fft_length=None): + x = convert_to_tensor(x) + complex_output = torch.fft.rfft(x, n=fft_length, dim=-1, norm='backward') + return (complex_output.real, complex_output.imag) + +def irfft(x, fft_length=None): + complex_input = _get_complex_tensor_from_tuple(x) + return torch.fft.irfft(complex_input, n=fft_length, dim=-1, norm='backward') + +def stft(x, sequence_length, sequence_stride, fft_length, window='hann', center=True): + if standardize_dtype(x.dtype) not in {'float32', 'float64'}: + raise TypeError(f'Invalid input type. Expected `float32` or `float64`. Received: input type={x.dtype}') + if fft_length < sequence_length: + raise ValueError(f'`fft_length` must equal or larger than `sequence_length`. Received: sequence_length={sequence_length}, fft_length={fft_length}') + if isinstance(window, str): + if window not in {'hann', 'hamming'}: + raise ValueError(f'If a string is passed to `window`, it must be one of `"hann"`, `"hamming"`. Received: window={window}') + x = convert_to_tensor(x) + if window is not None: + if isinstance(window, str): + if window == 'hann': + win = torch.hann_window(sequence_length, periodic=True, dtype=x.dtype, device=get_device()) + else: + win = torch.hamming_window(sequence_length, periodic=True, dtype=x.dtype, device=get_device()) + else: + win = convert_to_tensor(window, dtype=x.dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + else: + win = torch.ones((sequence_length,), dtype=x.dtype, device=get_device()) + need_unpack = False + (*batch_shape, samples) = x.shape + if len(x.shape) > 2: + need_unpack = True + flat_batchsize = math.prod(batch_shape) + x = torch.reshape(x, (flat_batchsize, samples)) + x = torch.stft(x, n_fft=fft_length, hop_length=sequence_stride, win_length=sequence_length, window=win, center=center, return_complex=True) + if need_unpack: + (fft_unique_bins, num_sequences) = x.shape[-2:] + x = torch.reshape(x, (*batch_shape, fft_unique_bins, num_sequences)) + x = torch.swapaxes(x, -2, -1) + return (x.real, x.imag) + +def istft(x, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + complex_input = _get_complex_tensor_from_tuple(x) + dtype = complex_input.real.dtype + win = None + if window is not None: + if isinstance(window, str): + if window == 'hann': + win = torch.hann_window(sequence_length, periodic=True, dtype=dtype, device=get_device()) + else: + win = torch.hamming_window(sequence_length, periodic=True, dtype=dtype, device=get_device()) + else: + win = convert_to_tensor(window, dtype=dtype) + if len(win.shape) != 1 or win.shape[-1] != sequence_length: + raise ValueError(f'The shape of `window` must be equal to [sequence_length].Received: window shape={win.shape}') + if sequence_length == fft_length and center is True and (win is not None): + need_unpack = False + (*batch_shape, num_sequences, fft_unique_bins) = complex_input.shape + if len(complex_input.shape) > 3: + need_unpack = True + flat_batchsize = math.prod(batch_shape) + complex_input = torch.reshape(complex_input, (flat_batchsize, num_sequences, fft_unique_bins)) + complex_input = torch.swapaxes(complex_input, -2, -1) + x = torch.istft(complex_input, n_fft=fft_length, hop_length=sequence_stride, win_length=sequence_length, window=win, center=center, length=length, return_complex=False) + if need_unpack: + samples = x.shape[-1] + x = torch.reshape(x, (*batch_shape, samples)) + return x + x = irfft(x, fft_length) + expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1) + if win is not None: + l_pad = (fft_length - sequence_length) // 2 + r_pad = fft_length - sequence_length - l_pad + win = pad(win, [[l_pad, r_pad]], 'constant') + _sequence_length = sequence_length + l_pad + r_pad + denom = torch.square(win) + overlaps = -(-_sequence_length // sequence_stride) + denom = pad(denom, [(0, overlaps * sequence_stride - _sequence_length)]) + denom = torch.reshape(denom, [overlaps, sequence_stride]) + denom = torch.sum(denom, 0, keepdims=True) + denom = torch.tile(denom, [overlaps, 1]) + denom = torch.reshape(denom, [overlaps * sequence_stride]) + win = torch.divide(win, denom[:_sequence_length]) + x = torch.multiply(x, win) + x = _overlap_sequences(x, sequence_stride) + start = 0 if center is False else fft_length // 2 + if length is not None: + end = start + length + elif center is True: + end = -(fft_length // 2) + else: + end = expected_output_len + return x[..., start:end] + +def rsqrt(x): + x = convert_to_tensor(x) + return torch.rsqrt(x) + +def erf(x): + x = convert_to_tensor(x) + return torch.erf(x) + +def erfinv(x): + x = convert_to_tensor(x) + return torch.erfinv(x) + +def solve(a, b): + a = convert_to_tensor(a) + b = convert_to_tensor(b) + return torch.linalg.solve(a, b) + +def norm(x, ord=None, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims) + +def logdet(x): + x = convert_to_tensor(x) + return torch.logdet(x) + +# File: keras-master/keras/src/backend/torch/nn.py +import torch +import torch.nn.functional as tnn +from keras.src import backend +from keras.src import tree +from keras.src.backend.common.backend_utils import compute_conv_transpose_padding_args_for_torch +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.numpy import expand_dims +from keras.src.backend.torch.numpy import maximum +from keras.src.backend.torch.numpy import where +from keras.src.utils.argument_validation import standardize_tuple + +def relu(x): + x = convert_to_tensor(x) + return tnn.relu(x) + +def relu6(x): + x = convert_to_tensor(x) + return tnn.relu6(x) + +def sigmoid(x): + x = convert_to_tensor(x) + return tnn.sigmoid(x) + +def tanh(x): + x = convert_to_tensor(x) + return tnn.tanh(x) + +def softplus(x): + x = convert_to_tensor(x) + return tnn.softplus(x) + +def softsign(x): + x = convert_to_tensor(x) + return tnn.softsign(x) + +def silu(x): + x = convert_to_tensor(x) + return tnn.silu(x) + +def log_sigmoid(x): + x = convert_to_tensor(x) + return tnn.logsigmoid(x) + +def leaky_relu(x, negative_slope=0.2): + x = convert_to_tensor(x) + return tnn.leaky_relu(x, negative_slope=negative_slope) + +def hard_sigmoid(x): + x = convert_to_tensor(x) + return tnn.hardsigmoid(x) + +def hard_silu(x): + x = convert_to_tensor(x) + return tnn.hardswish(x) + +def elu(x, alpha=1.0): + x = convert_to_tensor(x) + return tnn.elu(x, alpha) + +def selu(x): + x = convert_to_tensor(x) + return tnn.selu(x) + +def gelu(x, approximate=True): + x = convert_to_tensor(x) + if approximate: + return tnn.gelu(x, approximate='tanh') + return tnn.gelu(x) + +def softmax(x, axis=-1): + x = convert_to_tensor(x) + dtype = backend.standardize_dtype(x.dtype) + if get_device() == 'cpu' and backend.standardize_dtype(x.dtype) == 'float16': + x = cast(x, 'float32') + if axis is None: + output = torch.reshape(x, [-1]) + output = tnn.softmax(output, dim=-1) + output = torch.reshape(output, x.shape) + else: + output = tnn.softmax(x, dim=axis) + return cast(output, dtype) + +def log_softmax(x, axis=-1): + x = convert_to_tensor(x) + dtype = backend.standardize_dtype(x.dtype) + if get_device() == 'cpu' and backend.standardize_dtype(x.dtype) == 'float16': + x = cast(x, 'float32') + if axis is None: + output = torch.reshape(x, [-1]) + output = tnn.log_softmax(output, dim=-1) + output = torch.reshape(output, x.shape) + else: + output = tnn.log_softmax(x, dim=axis) + return cast(output, dtype) + +def _compute_padding_length(input_length, kernel_length, stride, dilation_rate=1): + total_padding_length = dilation_rate * (kernel_length - 1) - (input_length - 1) % stride + left_padding = total_padding_length // 2 + right_padding = (total_padding_length + 1) // 2 + return (left_padding, right_padding) + +def _apply_same_padding(inputs, kernel_size, strides, operation_type, dilation_rate=1): + spatial_shape = inputs.shape[2:] + num_spatial_dims = len(spatial_shape) + padding = () + for i in range(num_spatial_dims): + if operation_type == 'pooling': + padding_size = _compute_padding_length(spatial_shape[i], kernel_size[i], strides[i]) + mode = 'replicate' + else: + dilation_rate = standardize_tuple(dilation_rate, num_spatial_dims, 'dilation_rate') + padding_size = _compute_padding_length(spatial_shape[i], kernel_size[i], strides[i], dilation_rate[i]) + mode = 'constant' + padding = (padding_size,) + padding + if all([left == right for (left, right) in padding]): + return (inputs, [left for (left, _) in padding]) + flattened_padding = tuple((value for left_and_right in padding for value in left_and_right)) + return (tnn.pad(inputs, pad=flattened_padding, mode=mode), 0) + +def _transpose_spatial_inputs(inputs): + num_spatial_dims = inputs.ndim - 2 + if num_spatial_dims == 1: + inputs = torch.permute(inputs, (0, 2, 1)) + elif num_spatial_dims == 2: + inputs = torch.permute(inputs, (0, 3, 1, 2)) + elif num_spatial_dims == 3: + inputs = torch.permute(inputs, (0, 4, 1, 2, 3)) + else: + raise ValueError(f'Inputs must have ndim=3, 4 or 5, corresponding to 1D, 2D and 3D inputs. Received input shape: {inputs.shape}.') + return inputs + +def _transpose_spatial_outputs(outputs): + num_spatial_dims = len(outputs.shape) - 2 + if num_spatial_dims == 1: + outputs = torch.permute(outputs, (0, 2, 1)) + elif num_spatial_dims == 2: + outputs = torch.permute(outputs, (0, 2, 3, 1)) + elif num_spatial_dims == 3: + outputs = torch.permute(outputs, (0, 2, 3, 4, 1)) + return outputs + +def _transpose_conv_kernel(kernel): + num_spatial_dims = len(kernel.shape) - 2 + if num_spatial_dims == 1: + kernel = torch.permute(kernel, (2, 1, 0)) + elif num_spatial_dims == 2: + kernel = torch.permute(kernel, (3, 2, 0, 1)) + elif num_spatial_dims == 3: + kernel = torch.permute(kernel, (4, 3, 0, 1, 2)) + return kernel + +def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + inputs = convert_to_tensor(inputs) + num_spatial_dims = inputs.ndim - 2 + pool_size = standardize_tuple(pool_size, num_spatial_dims, 'pool_size') + if strides is None: + strides = pool_size + else: + strides = standardize_tuple(strides, num_spatial_dims, 'strides') + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + inputs = _transpose_spatial_inputs(inputs) + if padding == 'same': + (inputs, padding) = _apply_same_padding(inputs, pool_size, strides, operation_type='pooling') + else: + padding = 0 + device = get_device() + if device == 'meta': + inputs = torch.empty(size=inputs.shape, dtype=inputs.dtype, device='cpu') + if num_spatial_dims == 1: + outputs = tnn.max_pool1d(inputs, kernel_size=pool_size, stride=strides, padding=padding) + elif num_spatial_dims == 2: + outputs = tnn.max_pool2d(inputs, kernel_size=pool_size, stride=strides, padding=padding) + elif num_spatial_dims == 3: + outputs = tnn.max_pool3d(inputs, kernel_size=pool_size, stride=strides, padding=padding) + else: + raise ValueError(f'Inputs to pooling op must have ndim=3, 4 or 5, corresponding to 1D, 2D and 3D inputs. Received input shape: {inputs.shape}.') + outputs = outputs.to(device) + if data_format == 'channels_last': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def average_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + inputs = convert_to_tensor(inputs) + num_spatial_dims = inputs.ndim - 2 + pool_size = standardize_tuple(pool_size, num_spatial_dims, 'pool_size') + if strides is None: + strides = pool_size + else: + strides = standardize_tuple(strides, num_spatial_dims, 'strides') + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + inputs = _transpose_spatial_inputs(inputs) + if padding == 'same': + (inputs, padding) = _apply_same_padding(inputs, pool_size, strides, operation_type='pooling') + else: + padding = 0 + if num_spatial_dims == 1: + outputs = tnn.avg_pool1d(inputs, kernel_size=pool_size, stride=strides, padding=padding, count_include_pad=False) + elif num_spatial_dims == 2: + outputs = tnn.avg_pool2d(inputs, kernel_size=pool_size, stride=strides, padding=padding, count_include_pad=False) + elif num_spatial_dims == 3: + outputs = tnn.avg_pool3d(inputs, kernel_size=pool_size, stride=strides, padding=padding, count_include_pad=False) + else: + raise ValueError(f'Inputs to pooling op must have ndim=3, 4 or 5, corresponding to 1D, 2D and 3D inputs. Received input shape: {inputs.shape}.') + if data_format == 'channels_last': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + inputs = convert_to_tensor(inputs) + kernel = convert_to_tensor(kernel) + num_spatial_dims = inputs.ndim - 2 + strides = standardize_tuple(strides, num_spatial_dims, 'strides') + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + inputs = _transpose_spatial_inputs(inputs) + kernel = _transpose_conv_kernel(kernel) + if padding == 'same' and any((d != 1 for d in tree.flatten(strides))): + (inputs, padding) = _apply_same_padding(inputs, kernel.shape[2:], strides, operation_type='conv', dilation_rate=dilation_rate) + channels = inputs.shape[1] + kernel_in_channels = kernel.shape[1] + if channels % kernel_in_channels > 0: + raise ValueError(f'The number of input channels must be evenly divisible by kernel.shape[1]. Received: inputs.shape={inputs.shape}, kernel.shape={kernel.shape}') + groups = channels // kernel_in_channels + if num_spatial_dims == 1: + outputs = tnn.conv1d(inputs, kernel, stride=strides, dilation=dilation_rate, groups=groups, padding=padding) + elif num_spatial_dims == 2: + outputs = tnn.conv2d(inputs, kernel, stride=strides, dilation=dilation_rate, groups=groups, padding=padding) + elif num_spatial_dims == 3: + outputs = tnn.conv3d(inputs, kernel, stride=strides, dilation=dilation_rate, groups=groups, padding=padding) + else: + raise ValueError(f'Inputs to conv operation should have ndim=3, 4, or 5,corresponding to 1D, 2D and 3D inputs. Received input shape: {inputs.shape}.') + if data_format == 'channels_last': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + kernel = convert_to_tensor(kernel) + kernel = torch.reshape(kernel, kernel.shape[:-2] + (1, kernel.shape[-2] * kernel.shape[-1])) + return conv(inputs, kernel, strides, padding, data_format, dilation_rate) + +def separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + depthwise_conv_output = depthwise_conv(inputs, depthwise_kernel, strides, padding, data_format, dilation_rate) + return conv(depthwise_conv_output, pointwise_kernel, strides=1, padding='valid', data_format=data_format, dilation_rate=dilation_rate) + +def conv_transpose(inputs, kernel, strides=1, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + inputs = convert_to_tensor(inputs) + kernel = convert_to_tensor(kernel) + num_spatial_dims = inputs.ndim - 2 + strides = standardize_tuple(strides, num_spatial_dims, 'strides') + data_format = backend.standardize_data_format(data_format) + (torch_padding, torch_output_padding) = compute_conv_transpose_padding_args_for_torch(input_shape=inputs.shape, kernel_shape=kernel.shape, strides=strides, padding=padding, output_padding=output_padding, dilation_rate=dilation_rate) + if data_format == 'channels_last': + inputs = _transpose_spatial_inputs(inputs) + kernel = _transpose_conv_kernel(kernel) + kernel_spatial_shape = kernel.shape[2:] + if isinstance(dilation_rate, int): + dilation_rate = [dilation_rate] * len(kernel_spatial_shape) + if num_spatial_dims == 1: + outputs = tnn.conv_transpose1d(inputs, kernel, stride=strides, padding=torch_padding, output_padding=torch_output_padding, dilation=dilation_rate) + elif num_spatial_dims == 2: + outputs = tnn.conv_transpose2d(inputs, kernel, stride=strides, padding=torch_padding, output_padding=torch_output_padding, dilation=dilation_rate) + elif num_spatial_dims == 3: + outputs = tnn.conv_transpose3d(inputs, kernel, stride=strides, padding=torch_padding, output_padding=torch_output_padding, dilation=dilation_rate) + else: + raise ValueError(f'Inputs to conv transpose operation should have ndim=3, 4, or 5,corresponding to 1D, 2D and 3D inputs. Received input shape: {inputs.shape}.') + if data_format == 'channels_last': + outputs = _transpose_spatial_outputs(outputs) + return outputs + +def one_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with torch backend') + x = convert_to_tensor(x, dtype=torch.long) + zero = convert_to_tensor(0, dtype=torch.long) + output = tnn.one_hot(maximum(x, 0), num_classes) + output = where(expand_dims(x, axis=-1) >= 0, output, zero) + output = convert_to_tensor(output, dtype=dtype) + dims = output.dim() + if axis != -1 and axis != dims: + new_axes_order = list(range(dims)) + new_axes_order[axis] = -1 + for ax in range(axis + 1, dims): + new_axes_order[ax] -= 1 + output = output.permute(new_axes_order) + return output + +def multi_hot(x, num_classes, axis=-1, dtype='float32', sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with torch backend') + x = convert_to_tensor(x) + reduction_axis = 1 if len(x.shape) > 1 else 0 + outputs = torch.amax(one_hot(cast(x, 'int32'), num_classes, axis=axis, dtype=dtype), dim=reduction_axis) + return outputs + +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = convert_to_tensor(target) + output = convert_to_tensor(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if len(target.shape) < 1: + raise ValueError(f'Arguments `target` and `output` must be at least rank 1. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = tnn.log_softmax(output, dim=axis) + else: + output = output / torch.sum(output, dim=axis, keepdim=True) + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = torch.log(output) + return -torch.sum(target * log_prob, dim=axis) + +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = convert_to_tensor(target, dtype=torch.long) + output = convert_to_tensor(output) + if len(target.shape) == len(output.shape) and target.shape[-1] == 1: + target = torch.squeeze(target, dim=-1) + if len(output.shape) < 1: + raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') + if target.shape != output.shape[:-1]: + raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + log_prob = tnn.log_softmax(output, dim=axis) + else: + output = output / torch.sum(output, dim=axis, keepdim=True) + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + log_prob = torch.log(output) + target = one_hot(target, output.shape[axis], axis=axis) + return -torch.sum(target * log_prob, dim=axis) + +def binary_crossentropy(target, output, from_logits=False): + target = convert_to_tensor(target) + output = convert_to_tensor(output) + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if from_logits: + return tnn.binary_cross_entropy_with_logits(output, target, reduction='none') + else: + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + return tnn.binary_cross_entropy(output, target, reduction='none') + +def moments(x, axes, keepdims=False, synchronized=False): + if synchronized: + raise NotImplementedError('Argument synchronized=True is not supported with PyTorch.') + x = convert_to_tensor(x) + need_cast = False + ori_dtype = backend.standardize_dtype(x.dtype) + if ori_dtype == 'float16': + need_cast = True + x = cast(x, 'float32') + mean = torch.mean(x, dim=axes, keepdim=True) + variance = torch.mean(torch.square(x), dim=axes, keepdim=True) - torch.square(mean) + if not keepdims: + mean = torch.squeeze(mean, axes) + variance = torch.squeeze(variance, axes) + if need_cast: + mean = torch.clip(mean, torch.finfo(torch.float16).min, torch.finfo(torch.float16).max) + variance = torch.clip(variance, torch.finfo(torch.float16).min, torch.finfo(torch.float16).max) + mean = cast(mean, ori_dtype) + variance = cast(variance, ori_dtype) + return (mean, variance) + +def batch_normalization(x, mean, variance, axis, offset=None, scale=None, epsilon=0.001): + x = convert_to_tensor(x) + mean = convert_to_tensor(mean) + variance = convert_to_tensor(variance) + shape = [1] * len(x.shape) + shape[axis] = mean.shape[0] + mean = torch.reshape(mean, shape) + variance = torch.reshape(variance, shape) + if offset is not None: + offset = convert_to_tensor(offset) + offset = torch.reshape(offset, shape) + else: + offset = torch.zeros_like(mean) + if scale is not None: + scale = convert_to_tensor(scale) + scale = torch.reshape(scale, shape) + else: + scale = torch.ones_like(variance) + return x.subtract(mean).mul_(variance.add(epsilon).rsqrt_().mul(scale)).add_(offset) + +def ctc_loss(target, output, target_length, output_length, mask_index=0): + target = convert_to_tensor(target) + output = convert_to_tensor(output) + target_length = convert_to_tensor(target_length) + output_length = convert_to_tensor(output_length) + dtype = backend.result_type(output.dtype, 'float32') + output = cast(output, dtype) + output = torch.transpose(output, 1, 0) + logits = tnn.log_softmax(output, dim=-1) + loss = tnn.ctc_loss(logits, target, output_length, target_length, blank=mask_index, reduction='none') + return loss + +def _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=True, mask_index=None): + inputs = convert_to_tensor(inputs) + sequence_lengths = convert_to_tensor(sequence_lengths, dtype='int32') + (batch_size, max_length, num_classes) = inputs.shape + if mask_index is None: + mask_index = num_classes - 1 + indices = torch.argmax(inputs, axis=-1) + indices = cast(indices, 'int32') + scores = torch.max(inputs, axis=-1)[0] + seqlen_mask = torch.arange(max_length, device=indices.device)[None, :] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] + indices = torch.where(seqlen_mask, mask_index, indices) + scores = torch.where(seqlen_mask, 0.0, scores) + if merge_repeated: + repeat = indices[:, 1:] == indices[:, :-1] + repeat = tnn.pad(repeat, (1, 0, 0, 0)) + indices = torch.where(repeat, mask_index, indices) + invalid_mask = indices == mask_index + indices = torch.where(invalid_mask, -1, indices) + order = torch.unsqueeze(torch.arange(max_length, device=indices.device), dim=0) + order = torch.tile(order, (batch_size, 1)) + order = torch.where(invalid_mask, max_length, order) + order = torch.argsort(order, dim=-1) + indices = torch.take_along_dim(indices, order, dim=-1) + scores = -torch.sum(scores, axis=1)[:, None] + indices = torch.unsqueeze(indices, dim=0) + return (indices, scores) + +def ctc_decode(inputs, sequence_lengths, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, 'float32') + inputs = cast(inputs, dtype) + if strategy == 'greedy': + return _ctc_greedy_decode(inputs, sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index) + elif strategy == 'beam_search': + raise NotImplementedError("Torch backend doesn't yet support the beam search strategy for CTCdecoding.") + else: + raise ValueError(f"Invalid strategy {strategy}. Supported values are 'greedy' and 'beam_search'.") + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError(f'Input shapes {x1.shape} and {x2.shape} must match for PSNR calculation. ') + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + max_val = convert_to_tensor(max_val, dtype=x1.dtype) + mse = torch.mean((x1 - x2) ** 2) + psnr = 20 * torch.log10(max_val) - 10 * torch.log10(mse) + return psnr + +# File: keras-master/keras/src/backend/torch/numpy.py +import builtins +import math +import numpy as np +import torch +from keras.src.backend import KerasTensor +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.backend_utils import vectorize_impl +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.core import is_tensor +from keras.src.backend.torch.core import to_torch_dtype +TORCH_INT_TYPES = (torch.int8, torch.int16, torch.int32, torch.int64) + +def add(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return torch.add(x1, x2) + +def einsum(subscripts, *operands, **kwargs): + operands = [convert_to_tensor(operand) for operand in operands] + dtypes_to_resolve = list(set((standardize_dtype(x.dtype) for x in operands))) + if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == 'int8': + compute_dtype = 'int32' + if get_device() == 'cuda': + compute_dtype = config.floatx() + operands = [cast(operand, compute_dtype) for operand in operands] + return cast(torch.einsum(subscripts, *operands), 'int32') + return torch.einsum(subscripts, *operands) + +def subtract(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + if standardize_dtype(x1.dtype) == 'bool': + x1 = cast(x1, x2.dtype) + if standardize_dtype(x2.dtype) == 'bool': + x2 = cast(x2, x1.dtype) + return torch.subtract(x1, x2) + +def matmul(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + + def can_use_int_matmul(x1, x2): + if get_device() != 'cuda': + return False + x1_dtype = standardize_dtype(x1.dtype) + x2_dtype = standardize_dtype(x2.dtype) + if x1_dtype != 'int8' or x2_dtype != 'int8': + return False + x1_shape = x1.shape + x2_shape = x2.shape + if x1.ndim != 2 or x2.ndim != 2: + return False + if x1_shape[0] <= 16 or x1_shape[1] < 16 or x1_shape[1] % 8 != 0: + return False + if x2_shape[0] < 16 or x2_shape[0] % 8 != 0 or x2_shape[1] % 8 != 0: + return False + return True + if can_use_int_matmul(x1, x2): + return torch._int_mm(x1, x2) + x1_dtype = standardize_dtype(x1.dtype) + x2_dtype = standardize_dtype(x2.dtype) + if x1_dtype == 'int8' and x2_dtype == 'int8': + result_dtype = 'int32' + else: + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + compute_dtype = result_dtype + if compute_dtype == 'bool': + compute_dtype = config.floatx() + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + if get_device() == 'cuda' and 'int' in compute_dtype: + compute_dtype = config.floatx() + x1 = cast(x1, compute_dtype) + x2 = cast(x2, compute_dtype) + return cast(torch.matmul(x1, x2), result_dtype) + +def multiply(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return torch.multiply(x1, x2) + +def mean(x, axis=None, keepdims=False): + if isinstance(x, (list, tuple)): + x = stack(x) + x = convert_to_tensor(x) + if axis == () or axis == []: + return x + axis = to_tuple_or_list(axis) + ori_dtype = standardize_dtype(x.dtype) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + if 'int' in ori_dtype or ori_dtype == 'bool': + result_dtype = compute_dtype + else: + result_dtype = ori_dtype + result = torch.mean(x, axis, keepdims, dtype=to_torch_dtype(compute_dtype)) + return cast(result, result_dtype) + +def max(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + if 0 in x.shape: + if initial is None: + raise ValueError('Cannot compute the max of an empty tensor.') + elif keepdims: + return torch.full((1,) * len(x.shape), initial) + else: + return torch.tensor(initial) + if axis is None: + result = torch.max(x) + else: + result = amax(x, axis=axis, keepdims=keepdims) + if isinstance(getattr(result, 'values', None), torch.Tensor): + result = result.values + if initial is not None: + dtype = to_torch_dtype(result.dtype) + initial = convert_to_tensor(initial, dtype=dtype) + return torch.maximum(result, torch.full(result.shape, initial, dtype=dtype)) + return result + +def ones(shape, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + if isinstance(shape, int): + shape = (shape,) + return torch.ones(size=shape, dtype=dtype, device=get_device()) + +def zeros(shape, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + if isinstance(shape, int): + shape = (shape,) + return torch.zeros(size=shape, dtype=dtype, device=get_device()) + +def zeros_like(x, dtype=None): + x = convert_to_tensor(x) + dtype = to_torch_dtype(dtype or x.dtype) + return torch.zeros_like(x, dtype=dtype) + +def absolute(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + return x + return torch.abs(x) + +def abs(x): + return absolute(x) + +def all(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if axis is None: + return cast(torch.all(x), 'bool') + axis = to_tuple_or_list(axis) + for a in axis: + x = torch.all(x, dim=a, keepdim=keepdims) + return cast(x, 'bool') + +def any(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if axis is None: + return cast(torch.any(x), 'bool') + axis = to_tuple_or_list(axis) + for a in axis: + x = torch.any(x, dim=a, keepdim=keepdims) + return cast(x, 'bool') + +def amax(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if axis is None: + return torch.amax(x) + if axis == () or axis == []: + return x + return torch.amax(x, dim=axis, keepdim=keepdims) + +def amin(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if axis is None: + return torch.amin(x) + if axis == () or axis == []: + return x + return torch.amin(x, dim=axis, keepdim=keepdims) + +def append(x1, x2, axis=None): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + if axis is None: + return torch.cat((x1.flatten(), x2.flatten())) + return torch.cat((x1, x2), dim=axis) + +def arange(start, stop=None, step=1, dtype=None): + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(step, 'dtype', type(step))] + if stop is not None: + dtypes_to_resolve.append(getattr(stop, 'dtype', type(stop))) + dtype = dtypes.result_type(*dtypes_to_resolve) + dtype = to_torch_dtype(dtype) + if stop is None: + return torch.arange(end=start, dtype=dtype, device=get_device()) + return torch.arange(start, stop, step=step, dtype=dtype, device=get_device()) + +def arccos(x): + x = convert_to_tensor(x) + return torch.arccos(x) + +def arccosh(x): + x = convert_to_tensor(x) + return torch.arccosh(x) + +def arcsin(x): + x = convert_to_tensor(x) + return torch.arcsin(x) + +def arcsinh(x): + x = convert_to_tensor(x) + return torch.arcsinh(x) + +def arctan(x): + x = convert_to_tensor(x) + return torch.arctan(x) + +def arctan2(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + compute_dtype = result_dtype + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + x1 = cast(x1, compute_dtype) + x2 = cast(x2, compute_dtype) + return cast(torch.arctan2(x1, x2), result_dtype) + +def arctanh(x): + x = convert_to_tensor(x) + return torch.arctanh(x) + +def argmax(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'uint8') + return cast(torch.argmax(x, dim=axis, keepdim=keepdims), dtype='int32') + +def argmin(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'uint8') + return cast(torch.argmin(x, dim=axis, keepdim=keepdims), dtype='int32') + +def argsort(x, axis=-1): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'uint8') + if axis is None: + axis = -1 + x = x.reshape(-1) + return cast(torch.argsort(x, dim=axis, stable=True), dtype='int32') + +def array(x, dtype=None): + return convert_to_tensor(x, dtype=dtype) + +def average(x, axis=None, weights=None): + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype, float] + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + x = cast(x, dtype) + if weights is not None: + weights = cast(weights, dtype) + if axis == () or axis == []: + return x + if weights is not None: + return torch.sum(torch.mul(x, weights), dim=axis) / torch.sum(weights, dim=-1) + return torch.mean(x, axis) + +def bincount(x, weights=None, minlength=0, sparse=False): + if sparse: + raise ValueError('Unsupported value `sparse=True` with torch backend') + x = convert_to_tensor(x) + dtypes_to_resolve = [x.dtype] + if weights is not None: + weights = convert_to_tensor(weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + else: + dtype = 'int32' + if len(x.shape) == 2: + if weights is None: + + def bincount_fn(arr): + return torch.bincount(arr, minlength=minlength) + bincounts = list(map(bincount_fn, x)) + else: + + def bincount_fn(arr_w): + return torch.bincount(arr_w[0], weights=arr_w[1], minlength=minlength) + bincounts = list(map(bincount_fn, zip(x, weights))) + return cast(torch.stack(bincounts), dtype) + return cast(torch.bincount(x, weights, minlength), dtype) + +def bitwise_and(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return torch.bitwise_and(x, y) + +def bitwise_invert(x): + x = convert_to_tensor(x) + return torch.bitwise_not(x) + +def bitwise_not(x): + return bitwise_invert(x) + +def bitwise_or(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return torch.bitwise_or(x, y) + +def bitwise_xor(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return torch.bitwise_xor(x, y) + +def bitwise_left_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return torch.bitwise_left_shift(x, y) + +def left_shift(x, y): + return bitwise_left_shift(x, y) + +def bitwise_right_shift(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + return torch.bitwise_right_shift(x, y) + +def right_shift(x, y): + return bitwise_right_shift(x, y) + +def broadcast_to(x, shape): + x = convert_to_tensor(x) + return torch.broadcast_to(x, shape) + +def ceil(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if ori_dtype == 'bool': + x = cast(x, 'uint8') + elif get_device() == 'cpu' and ori_dtype == 'float16': + x = cast(x, config.floatx()) + if ori_dtype == 'int64': + dtype = config.floatx() + else: + dtype = dtypes.result_type(ori_dtype, float) + return cast(torch.ceil(x), dtype=dtype) + +def clip(x, x_min, x_max): + x = convert_to_tensor(x) + x_min = convert_to_tensor(x_min) + x_max = convert_to_tensor(x_max) + ori_dtype = standardize_dtype(x.dtype) + if get_device() == 'cpu' and ori_dtype == 'float16': + x = cast(x, 'float32') + return cast(torch.clip(x, min=x_min, max=x_max), 'float16') + if ori_dtype == 'bool': + x = cast(x, 'int32') + return torch.clip(x, min=x_min, max=x_max) + +def concatenate(xs, axis=0): + xs = [convert_to_tensor(x) for x in xs] + return torch.cat(xs, dim=axis) + +def conjugate(x): + if not isinstance(x, torch.Tensor): + x = torch.from_numpy(x) + return torch.conj(x).resolve_conj() + +def conj(x): + if not isinstance(x, torch.Tensor): + x = torch.from_numpy(x) + return torch.conj(x).resolve_conj() + +def copy(x): + x = convert_to_tensor(x) + return torch.clone(x) + +def cos(x): + x = convert_to_tensor(x) + return torch.cos(x) + +def cosh(x): + x = convert_to_tensor(x) + return torch.cosh(x) + +def count_nonzero(x, axis=None): + x = convert_to_tensor(x) + if axis == () or axis == []: + return cast(torch.ne(x, 0), 'int32') + return cast(torch.count_nonzero(x, dim=axis).T, 'int32') + +def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=-1): + if axisa != -1 or axisb != -1 or axisc != -1: + raise ValueError(f'Torch backend does not support `axisa`, `axisb`, or `axisc`. Received: axisa={axisa}, axisb={axisb}, axisc={axisc}. Please use `axis` arg in torch backend.') + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + compute_dtype = dtypes.result_type(x1.dtype, x2.dtype) + result_dtype = compute_dtype + if get_device() == 'cuda' and compute_dtype == 'bfloat16': + compute_dtype = 'float32' + elif get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + x1 = cast(x1, compute_dtype) + x2 = cast(x2, compute_dtype) + return cast(torch.cross(x1, x2, dim=axis), result_dtype) + +def cumprod(x, axis=None, dtype=None): + x = convert_to_tensor(x) + if axis is None: + x = x.flatten() + axis = 0 + dtype = dtypes.result_type(dtype or x.dtype) + if dtype == 'bool': + dtype = 'int32' + elif get_device() == 'cpu' and dtype == 'float16': + return cast(torch.cumprod(x, dim=axis, dtype=to_torch_dtype('float32')), 'float16') + return torch.cumprod(x, dim=axis, dtype=to_torch_dtype(dtype)) + +def cumsum(x, axis=None, dtype=None): + x = convert_to_tensor(x) + if axis is None: + x = x.flatten() + axis = 0 + dtype = dtypes.result_type(dtype or x.dtype) + if dtype == 'bool': + dtype = 'int32' + elif get_device() == 'cpu' and dtype == 'float16': + return cast(torch.cumsum(x, dim=axis, dtype=to_torch_dtype('float32')), 'float16') + return torch.cumsum(x, dim=axis, dtype=to_torch_dtype(dtype)) + +def diag(x, k=0): + x = convert_to_tensor(x) + return torch.diag(x, diagonal=k) + +def diagonal(x, offset=0, axis1=0, axis2=1): + x = convert_to_tensor(x) + return torch.diagonal(x, offset=offset, dim1=axis1, dim2=axis2) + +def diff(a, n=1, axis=-1): + a = convert_to_tensor(a) + return torch.diff(a, n=n, dim=axis) + +def digitize(x, bins): + x = convert_to_tensor(x) + bins = convert_to_tensor(bins) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'uint8') + return cast(torch.bucketize(x, bins, right=True), 'int32') + +def dot(x, y): + x = convert_to_tensor(x) + y = convert_to_tensor(y) + result_dtype = dtypes.result_type(x.dtype, y.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + x = cast(x, compute_dtype) + y = cast(y, compute_dtype) + if x.ndim == 0 or y.ndim == 0: + return cast(torch.multiply(x, y), result_dtype) + return cast(torch.matmul(x, y), result_dtype) + +def empty(shape, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + return torch.empty(size=shape, dtype=dtype, device=get_device()) + +def equal(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.eq(x1, x2) + +def exp(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = cast(x, config.floatx()) + return torch.exp(x) + +def expand_dims(x, axis): + x = convert_to_tensor(x) + axis = to_tuple_or_list(axis) + out_ndim = len(x.shape) + len(axis) + axis = sorted([canonicalize_axis(a, out_ndim) for a in axis]) + for a in axis: + x = torch.unsqueeze(x, dim=a) + return x + +def expm1(x): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = cast(x, config.floatx()) + return torch.expm1(x) + +def flip(x, axis=None): + x = convert_to_tensor(x) + if axis is None: + axis = tuple(range(x.ndim)) + axis = to_tuple_or_list(axis) + return torch.flip(x, dims=axis) + +def floor(x): + x = convert_to_tensor(x) + dtype = config.floatx() if standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + x = cast(x, dtype) + return torch.floor(x) + +def full(shape, fill_value, dtype=None): + dtype = to_torch_dtype(dtype) + fill_value = convert_to_tensor(fill_value, dtype=dtype) + if len(fill_value.shape) > 0: + expand_size = len(shape) - len(fill_value.shape) + tile_shape = tuple(shape[:expand_size]) + (1,) * len(fill_value.shape) + return torch.tile(fill_value, tile_shape) + return torch.full(size=shape, fill_value=fill_value, dtype=dtype, device=get_device()) + +def full_like(x, fill_value, dtype=None): + dtype = dtype or x.dtype + return full(shape=x.shape, fill_value=fill_value, dtype=dtype) + +def greater(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.greater(x1, x2) + +def greater_equal(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.greater_equal(x1, x2) + +def hstack(xs): + xs = [convert_to_tensor(x) for x in xs] + return torch.hstack(xs) + +def identity(n, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + if get_device() == 'cpu' and dtype == torch.bfloat16: + return cast(torch.eye(n, dtype=to_torch_dtype('float32'), device=get_device()), dtype) + return torch.eye(n, dtype=dtype, device=get_device()) + +def imag(x): + if not isinstance(x, torch.Tensor): + x = torch.from_numpy(x) + return torch.imag(x) + +def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = cast(x1, result_dtype) + x2 = cast(x2, result_dtype) + return torch.isclose(x1, x2, rtol, atol, equal_nan) + +def isfinite(x): + x = convert_to_tensor(x) + return torch.isfinite(x) + +def isinf(x): + x = convert_to_tensor(x) + return torch.isinf(x) + +def isnan(x): + x = convert_to_tensor(x) + return torch.isnan(x) + +def less(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.less(x1, x2) + +def less_equal(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.less_equal(x1, x2) + +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + if axis != 0: + raise ValueError(f'torch.linspace does not support an `axis` argument. Received axis={axis}') + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(stop, 'dtype', type(stop)), float] + dtype = dtypes.result_type(*dtypes_to_resolve) + dtype = to_torch_dtype(dtype) + step = convert_to_tensor(torch.nan) + if endpoint: + if num > 1: + step = (stop - start) / (num - 1) + else: + if num > 0: + step = (stop - start) / num + if num > 1: + stop = stop - (stop - start) / num + if hasattr(start, '__len__') and hasattr(stop, '__len__'): + start = convert_to_tensor(start, dtype=dtype) + stop = convert_to_tensor(stop, dtype=dtype) + steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1) + for i in range(start.ndim): + steps = steps.unsqueeze(-1) + linspace = start[None] + steps * (stop - start)[None] + else: + linspace = torch.linspace(start=start, end=stop, steps=num, dtype=dtype, device=get_device()) + if retstep is True: + return (linspace, step) + return linspace + +def log(x): + x = convert_to_tensor(x) + return torch.log(x) + +def log10(x): + x = convert_to_tensor(x) + return torch.log10(x) + +def log1p(x): + x = convert_to_tensor(x) + return torch.log1p(x) + +def log2(x): + x = convert_to_tensor(x) + return torch.log2(x) + +def logaddexp(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype, float) + if get_device() == 'cpu' and dtype == 'float16': + x1 = cast(x1, 'float32') + x2 = cast(x2, 'float32') + return cast(torch.logaddexp(x1, x2), dtype) + else: + x1 = cast(x1, dtype) + x2 = cast(x2, dtype) + return torch.logaddexp(x1, x2) + +def logical_and(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.logical_and(x1, x2) + +def logical_not(x): + x = convert_to_tensor(x) + return torch.logical_not(x) + +def logical_or(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.logical_or(x1, x2) + +def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): + if axis != 0: + raise ValueError(f'torch.logspace does not support an `axis` argument. Received axis={axis}') + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(stop, 'dtype', type(stop)), float] + dtype = dtypes.result_type(*dtypes_to_resolve) + dtype = to_torch_dtype(dtype) + if endpoint is False: + stop = stop - (stop - start) / num + if hasattr(start, '__len__') and hasattr(stop, '__len__'): + start = convert_to_tensor(start, dtype=dtype) + stop = convert_to_tensor(stop, dtype=dtype) + steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1) + for i in range(start.ndim): + steps = steps.unsqueeze(-1) + linspace = start[None] + steps * (stop - start)[None] + logspace = base ** linspace + else: + compute_dtype = dtype + if get_device() == 'cpu' and dtype == torch.float16: + compute_dtype = torch.float32 + logspace = cast(torch.logspace(start=start, end=stop, steps=num, base=base, dtype=compute_dtype, device=get_device()), dtype) + return logspace + +def maximum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return torch.maximum(x1, x2) + +def median(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + x = cast(x, compute_dtype) + if axis is None and keepdims is False: + return cast(torch.median(x), result_dtype) + elif isinstance(axis, int): + return cast(torch.median(x, dim=axis, keepdim=keepdims)[0], result_dtype) + if axis is None: + y = reshape(x, [-1]) + else: + axis = [canonicalize_axis(a, x.ndim) for a in axis] + other_dims = sorted(set(range(x.ndim)).difference(axis)) + perm = other_dims + list(axis) + x_permed = torch.permute(x, dims=perm) + x_shape = list(x.shape) + other_shape = [x_shape[i] for i in other_dims] + end_shape = [math.prod([x_shape[i] for i in axis])] + full_shape = other_shape + end_shape + y = reshape(x_permed, full_shape) + y = torch.median(y, dim=-1)[0] + if keepdims: + if axis is None: + for _ in range(x.ndim): + y = expand_dims(y, axis=-1) + else: + for i in sorted(axis): + y = expand_dims(y, axis=i) + return cast(y, result_dtype) + +def meshgrid(*x, indexing='xy'): + x = [convert_to_tensor(sc_tensor) for sc_tensor in x] + return torch.meshgrid(x, indexing=indexing) + +def min(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + if 0 in x.shape: + if initial is None: + raise ValueError('Cannot compute the min of an empty tensor.') + elif keepdims: + return torch.full((1,) * len(x.shape), initial) + else: + return torch.tensor(initial) + if axis is None: + result = torch.min(x) + else: + result = amin(x, axis=axis, keepdims=keepdims) + if isinstance(getattr(result, 'values', None), torch.Tensor): + result = result.values + if initial is not None: + dtype = to_torch_dtype(result.dtype) + initial = convert_to_tensor(initial, dtype=dtype) + return torch.minimum(result, initial) + return result + +def minimum(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1 = convert_to_tensor(x1, dtype) + x2 = convert_to_tensor(x2, dtype) + return torch.minimum(x1, x2) + +def mod(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + if dtype == 'bool': + x1 = cast(x1, 'int32') + x2 = cast(x2, 'int32') + return torch.remainder(x1, x2) + +def moveaxis(x, source, destination): + x = convert_to_tensor(x) + return torch.moveaxis(x, source=source, destination=destination) + +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + x = convert_to_tensor(x) + return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + +def ndim(x): + x = convert_to_tensor(x) + return x.ndim + +def nonzero(x): + x = convert_to_tensor(x) + return cast(torch.nonzero(x).T, 'int32') + +def not_equal(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.not_equal(x1, x2) + +def ones_like(x, dtype=None): + x = convert_to_tensor(x) + dtype = to_torch_dtype(dtype or x.dtype) + return torch.ones_like(x, dtype=dtype) + +def outer(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.outer(x1.flatten(), x2.flatten()) + +def pad(x, pad_width, mode='constant', constant_values=None): + kwargs = {} + if constant_values is not None: + if mode != 'constant': + raise ValueError(f"Argument `constant_values` can only be provided when `mode == 'constant'`. Received: mode={mode}") + kwargs['value'] = constant_values + x = convert_to_tensor(x) + pad_sum = [] + pad_width = list(pad_width)[::-1] + pad_width_sum = 0 + for pad in pad_width: + pad_width_sum += pad[0] + pad[1] + for pad in pad_width: + pad_sum += pad + pad_width_sum -= pad[0] + pad[1] + if pad_width_sum == 0: + break + if mode == 'symmetric': + mode = 'replicate' + if mode == 'constant': + return torch.nn.functional.pad(x, pad=pad_sum, mode=mode, **kwargs) + ori_dtype = x.dtype + ori_ndim = x.ndim + need_squeeze = False + if x.ndim < 3: + need_squeeze = True + new_dims = [1] * (3 - x.ndim) + x = x.view(*new_dims, *x.shape) + need_cast = False + if x.dtype not in (torch.float32, torch.float64): + need_cast = True + x = cast(x, torch.float32) + x = torch.nn.functional.pad(x, pad=pad_sum, mode=mode) + if need_cast: + x = cast(x, ori_dtype) + if need_squeeze: + x = torch.squeeze(x, dim=tuple(range(3 - ori_ndim))) + return x + +def prod(x, axis=None, keepdims=False, dtype=None): + x = convert_to_tensor(x) + if dtype is None: + dtype = dtypes.result_type(x.dtype) + if dtype == 'bool': + dtype = 'int32' + elif dtype in ('int8', 'int16'): + dtype = 'int32' + elif dtype == 'uint8': + dtype = 'int32' + compute_dtype = dtype + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + if axis is None: + return cast(torch.prod(x, dtype=to_torch_dtype(compute_dtype)), dtype) + axis = to_tuple_or_list(axis) + for a in axis: + x = cast(torch.prod(x, dim=a, keepdim=keepdims, dtype=to_torch_dtype(compute_dtype)), dtype) + return x + +def quantile(x, q, axis=None, method='linear', keepdims=False): + x = convert_to_tensor(x) + q = convert_to_tensor(q) + axis = to_tuple_or_list(axis) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + x = cast(x, compute_dtype) + if x.dtype != q.dtype: + q = cast(q, x.dtype) + if axis is None: + y = reshape(x, [-1]) + else: + axis = [canonicalize_axis(a, x.ndim) for a in axis] + other_dims = sorted(set(range(x.ndim)).difference(axis)) + perm = other_dims + list(axis) + x_permed = torch.permute(x, dims=perm) + x_shape = list(x.shape) + other_shape = [x_shape[i] for i in other_dims] + end_shape = [math.prod([x_shape[i] for i in axis])] + full_shape = other_shape + end_shape + y = reshape(x_permed, full_shape) + y = torch.quantile(y, q, dim=-1, interpolation=method) + if keepdims: + if axis is None: + for _ in range(x.ndim): + y = expand_dims(y, axis=-1) + else: + for i in sorted(axis): + i = i + 1 if q.ndim > 0 else i + y = expand_dims(y, axis=i) + return cast(y, result_dtype) + +def ravel(x): + x = convert_to_tensor(x) + return torch.ravel(x) + +def real(x): + if not isinstance(x, torch.Tensor): + x = torch.from_numpy(x) + return torch.real(x) + +def reciprocal(x): + x = convert_to_tensor(x) + return torch.reciprocal(x) + +def repeat(x, repeats, axis=None): + x = convert_to_tensor(x) + if get_device() == 'meta': + x = KerasTensor(x.shape, standardize_dtype(x.dtype)) + outputs = repeat(x, repeats, axis=axis) + return torch.empty(size=outputs.shape, dtype=to_torch_dtype(outputs.dtype), device=get_device()) + repeats = convert_to_tensor(repeats, dtype=int) + return torch.repeat_interleave(x, repeats, dim=axis) + +def reshape(x, newshape): + if not isinstance(newshape, (list, tuple)): + newshape = (newshape,) + x = convert_to_tensor(x) + return torch.reshape(x, newshape) + +def roll(x, shift, axis=None): + x = convert_to_tensor(x) + return torch.roll(x, shift, dims=axis) + +def searchsorted(sorted_sequence, values, side='left'): + if ndim(sorted_sequence) != 1: + raise ValueError(f'`searchsorted` only supports 1-D sorted sequences. You can use `keras.ops.vectorized_map` to extend it to N-D sequences. Received: sorted_sequence.shape={sorted_sequence.shape}') + out_int32 = len(sorted_sequence) <= np.iinfo(np.int32).max + return torch.searchsorted(sorted_sequence, values, side=side, out_int32=out_int32) + +def sign(x): + x = convert_to_tensor(x) + return torch.sign(x) + +def sin(x): + x = convert_to_tensor(x) + return torch.sin(x) + +def sinh(x): + x = convert_to_tensor(x) + return torch.sinh(x) + +def size(x): + x_shape = convert_to_tensor(tuple(x.shape)) + return torch.prod(x_shape) + +def sort(x, axis=-1): + x = convert_to_tensor(x) + if get_device() == 'cuda' and standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'uint8') + return cast(torch.sort(x, dim=axis).values, 'bool') + return torch.sort(x, dim=axis).values + +def split(x, indices_or_sections, axis=0): + x = convert_to_tensor(x) + dim = x.shape[axis] + if not isinstance(indices_or_sections, int): + indices_or_sections = convert_to_tensor(indices_or_sections) + start_size = indices_or_sections[0:1] + end_size = dim - indices_or_sections[-1:] + chunk_sizes = torch.concat([start_size, torch.diff(indices_or_sections), end_size], dim=0) + chunk_sizes = chunk_sizes.tolist() + else: + if dim % indices_or_sections != 0: + raise ValueError(f'Received indices_or_sections={indices_or_sections} (interpreted as a number of sections) and axis={axis}, but input dimension x.shape[{axis}]={x.shape[axis]} is not divisible by {indices_or_sections}. Full input shape: x.shape={x.shape}') + chunk_sizes = dim // indices_or_sections + out = torch.split(tensor=x, split_size_or_sections=chunk_sizes, dim=axis) + if dim == 0 and isinstance(indices_or_sections, int): + out = [out[0].clone() for _ in range(indices_or_sections)] + return list(out) + +def stack(x, axis=0): + x = [convert_to_tensor(elem) for elem in x] + return torch.stack(x, dim=axis) + +def std(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype or ori_dtype == 'bool': + x = cast(x, 'float32') + return torch.std(x, dim=axis, keepdim=keepdims, unbiased=False) + +def swapaxes(x, axis1, axis2): + x = convert_to_tensor(x) + return torch.swapaxes(x, axis0=axis1, axis1=axis2) + +def take(x, indices, axis=None): + x = convert_to_tensor(x) + indices = convert_to_tensor(indices).long() + x_dim = x.shape[axis] if axis is not None else x.shape[0] + indices = torch.where(indices < 0, indices + x_dim, indices) + if x.ndim == 2 and axis == 0: + return torch.nn.functional.embedding(indices, x) + if axis is None: + x = torch.reshape(x, (-1,)) + axis = 0 + if axis is not None: + axis = canonicalize_axis(axis, x.ndim) + shape = x.shape[:axis] + indices.shape + x.shape[axis + 1:] + indices = indices.ravel() + out = torch.index_select(x, dim=axis, index=indices).squeeze(axis) + return out.reshape(shape) + return torch.take(x, index=indices) + +def take_along_axis(x, indices, axis=None): + x = convert_to_tensor(x) + indices = convert_to_tensor(indices).long() + x_dim = x.shape[axis] if axis is not None else x.shape[0] + indices = torch.where(indices < 0, indices + x_dim, indices) + return torch.take_along_dim(x, indices, dim=axis) + +def tan(x): + x = convert_to_tensor(x) + return torch.tan(x) + +def tanh(x): + x = convert_to_tensor(x) + return torch.tanh(x) + +def tensordot(x1, x2, axes=2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + x1 = cast(x1, compute_dtype) + x2 = cast(x2, compute_dtype) + if isinstance(axes, (list, tuple)): + (first, second) = axes + if not isinstance(first, (list, tuple)): + first = (first,) + if not isinstance(second, (list, tuple)): + second = (second,) + axes = (first, second) + return cast(torch.tensordot(x1, x2, dims=axes), result_dtype) + +def round(x, decimals=0): + x = convert_to_tensor(x) + ori_dtype = standardize_dtype(x.dtype) + if 'int' in ori_dtype: + x = cast(x, config.floatx()) + return cast(torch.round(x, decimals=decimals), ori_dtype) + return torch.round(x, decimals=decimals) + +def tile(x, repeats): + if is_tensor(repeats): + repeats = tuple(repeats.int().numpy()) + if isinstance(repeats, int): + repeats = (repeats,) + x = convert_to_tensor(x) + return torch.tile(x, dims=repeats) + +def trace(x, offset=None, axis1=None, axis2=None): + x = convert_to_tensor(x) + dtype = standardize_dtype(x.dtype) + if dtype != 'int64': + dtype = dtypes.result_type(dtype, 'int32') + return torch.sum(torch.diagonal(x, offset, axis1, axis2), dim=-1, dtype=to_torch_dtype(dtype)) + +def tri(N, M=None, k=0, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + M = M or N + x = torch.ones((N, M), dtype=dtype, device=get_device()) + return torch.tril(x, diagonal=k) + +def tril(x, k=0): + x = convert_to_tensor(x) + return torch.tril(x, diagonal=k) + +def triu(x, k=0): + x = convert_to_tensor(x) + return torch.triu(x, diagonal=k) + +def trunc(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + return x + return torch.trunc(x) + +def vdot(x1, x2): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + result_dtype = dtypes.result_type(x1.dtype, x2.dtype) + compute_dtype = dtypes.result_type(result_dtype, float) + if get_device() == 'cpu' and compute_dtype == 'float16': + compute_dtype = 'float32' + x1 = cast(x1, compute_dtype) + x2 = cast(x2, compute_dtype) + return cast(torch.vdot(x1, x2), result_dtype) + +def vstack(xs): + xs = [convert_to_tensor(x) for x in xs] + return torch.vstack(xs) + +def vectorize(pyfunc, *, excluded=None, signature=None): + return vectorize_impl(pyfunc, torch.vmap, excluded=excluded, signature=signature) + +def where(condition, x1, x2): + condition = convert_to_tensor(condition, dtype=bool) + if x1 is not None and x2 is not None: + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + return torch.where(condition, x1, x2) + else: + return torch.where(condition) + +def divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + return torch.divide(x1, x2) + +def divide_no_nan(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + return torch.where(x2 == 0, 0, torch.divide(x1, x2)) + +def true_divide(x1, x2): + return divide(x1, x2) + +def power(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.pow(x1, x2) + +def negative(x): + x = convert_to_tensor(x) + return torch.negative(x) + +def square(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'bool': + x = cast(x, 'int32') + return torch.square(x) + +def sqrt(x): + x = convert_to_tensor(x) + if standardize_dtype(x.dtype) == 'int64': + x = cast(x, config.floatx()) + return torch.sqrt(x) + +def squeeze(x, axis=None): + x = convert_to_tensor(x) + if axis is not None: + return torch.squeeze(x, dim=axis) + return torch.squeeze(x) + +def transpose(x, axes=None): + x = convert_to_tensor(x) + if axes is not None: + return torch.permute(x, dims=axes) + return x.T + +def var(x, axis=None, keepdims=False): + x = convert_to_tensor(x) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + result_dtype = dtypes.result_type(x.dtype, float) + if axis == [] or axis == (): + return zeros_like(x, result_dtype) + x = cast(x, compute_dtype) + return cast(torch.var(x, dim=axis, keepdim=keepdims, correction=0), result_dtype) + +def sum(x, axis=None, keepdims=False): + if isinstance(x, (list, tuple)): + x = stack(x) + x = convert_to_tensor(x) + if axis == () or axis == []: + return x + dtype = standardize_dtype(x.dtype) + if dtype in ('bool', 'uint8', 'int8', 'int16'): + dtype = 'int32' + if axis is not None: + return cast(torch.sum(x, axis=axis, keepdim=keepdims), dtype) + return cast(torch.sum(x), dtype) + +def eye(N, M=None, k=None, dtype=None): + dtype = to_torch_dtype(dtype or config.floatx()) + M = N if M is None else M + k = 0 if k is None else k + if k == 0: + if get_device() == 'cpu' and dtype == torch.bfloat16: + return cast(torch.eye(N, M, dtype=to_torch_dtype('float32'), device=get_device()), dtype) + return torch.eye(N, M, dtype=dtype, device=get_device()) + diag_length = builtins.max(N, M) + diag = torch.ones(diag_length, dtype=dtype, device=get_device()) + return torch.diag(diag, diagonal=k)[:N, :M] + +def floor_divide(x1, x2): + if not isinstance(x1, (int, float)): + x1 = convert_to_tensor(x1) + if not isinstance(x2, (int, float)): + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return cast(torch.floor_divide(x1, x2), dtype) + +def logical_xor(x1, x2): + (x1, x2) = (convert_to_tensor(x1), convert_to_tensor(x2)) + return torch.logical_xor(x1, x2) + +def correlate(x1, x2, mode='valid'): + x1 = convert_to_tensor(x1) + x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if dtype == 'int64': + dtype = 'float64' + elif dtype not in ['bfloat16', 'float16', 'float64']: + dtype = 'float32' + x1 = cast(x1, dtype) + x2 = cast(x2, dtype) + (x1_len, x2_len) = (x1.size(0), x2.size(0)) + if x1.shape[:-1] != x2.shape[:-1]: + new_shape = [max(i, j) for (i, j) in zip(x1.shape[:-1], x2.shape[:-1])] + x1 = torch.broadcast_to(x1, new_shape + [x1.shape[-1]]) + x2 = torch.broadcast_to(x2, new_shape + [x2.shape[-1]]) + num_signals = torch.tensor(x1.shape[:-1]).prod() + x1 = torch.reshape(x1, (int(num_signals), x1.size(-1))) + x2 = torch.reshape(x2, (int(num_signals), x2.size(-1))) + output = torch.nn.functional.conv1d(x1, x2.unsqueeze(1), groups=x1.size(0), padding=x2.size(-1) - 1) + output_shape = x1.shape[:-1] + (-1,) + result = output.reshape(output_shape) + if mode == 'valid': + target_length = builtins.max(x1_len, x2_len) - builtins.min(x1_len, x2_len) + 1 + start_idx = (result.size(-1) - target_length) // 2 + result = result[..., start_idx:start_idx + target_length] + if mode == 'same': + start_idx = (result.size(-1) - x1_len) // 2 + result = result[..., start_idx:start_idx + x1_len] + return torch.squeeze(result) + +def select(condlist, choicelist, default=0): + condlist = [convert_to_tensor(c) for c in condlist] + choicelist = [convert_to_tensor(c) for c in choicelist] + out = convert_to_tensor(default) + for (c, v) in reversed(list(zip(condlist, choicelist))): + out = torch.where(c, v, out) + return out + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(torch.linalg.slogdet(x)) + +def argpartition(x, kth, axis=-1): + x = convert_to_tensor(x, 'int32') + x = torch.transpose(x, axis, -1) + bottom_ind = torch.topk(-x, kth + 1)[1] + + def set_to_zero(a, i): + a[i] = torch.zeros(1, dtype=a.dtype, device=a.device) + return a + for _ in range(x.dim() - 1): + set_to_zero = torch.vmap(set_to_zero) + proxy = set_to_zero(torch.ones_like(x, dtype=torch.int32), bottom_ind) + top_ind = torch.topk(proxy, x.shape[-1] - kth - 1)[1] + out = torch.cat([bottom_ind, top_ind], dim=x.dim() - 1) + return cast(torch.transpose(out, -1, axis), 'int32') + +# File: keras-master/keras/src/backend/torch/optimizers/torch_adadelta.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Adadelta(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adadelta): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + rho = self.rho + accumulated_grads = [self._accumulated_grads[self._get_variable_index(variable)].value for variable in keras_variables] + accumulated_delta_vars = [self._accumulated_delta_vars[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(accumulated_grads, rho) + torch._foreach_add_(accumulated_grads, torch._foreach_mul(grads, grads), alpha=1 - rho) + + def rms(x): + return torch._foreach_sqrt(torch._foreach_add(x, self.epsilon)) + delta_vars = torch._foreach_mul(torch._foreach_div(torch._foreach_mul(rms(accumulated_delta_vars), grads), rms(accumulated_grads)), -1) + torch._foreach_mul_(accumulated_delta_vars, rho) + torch._foreach_add_(accumulated_delta_vars, torch._foreach_mul(delta_vars, delta_vars), alpha=1 - rho) + torch._foreach_add_(variables, delta_vars, alpha=lr) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_adagrad.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Adagrad(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adagrad): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + accumulators = [self._accumulators[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_add_(accumulators, torch._foreach_mul(grads, grads)) + torch._foreach_add_(variables, torch._foreach_div(torch._foreach_mul(grads, lr), torch._foreach_sqrt(torch._foreach_add(accumulators, self.epsilon))), alpha=-1) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_adam.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Adam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adam): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + local_step = ops.cast(self.iterations + 1, dtype) + beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step) + beta_2_power = ops.power(ops.cast(self.beta_2, dtype), local_step) + alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power) + m_list = [self._momentums[self._get_variable_index(variable)].value for variable in keras_variables] + v_list = [self._velocities[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(m_list, self.beta_1) + torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1) + torch._foreach_mul_(v_list, self.beta_2) + torch._foreach_add_(v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2) + if self.amsgrad: + v_hat_list = [self._velocity_hats[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_maximum_(v_hat_list, v_list) + v_list = v_hat_list + torch._foreach_add_(variables, torch._foreach_div(torch._foreach_mul(m_list, alpha), torch._foreach_add(torch._foreach_sqrt(v_list), self.epsilon)), alpha=-1) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_adamax.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Adamax(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adamax): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + local_step = ops.cast(self.iterations + 1, dtype) + beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step) + m_list = [self._m[self._get_variable_index(variable)].value for variable in keras_variables] + u_list = [self._u[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(m_list, self.beta_1) + torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1) + torch._foreach_mul_(u_list, self.beta_2) + torch._foreach_maximum_(u_list, torch._foreach_abs(grads)) + torch._foreach_add_(variables, torch._foreach_div(torch._foreach_mul(m_list, lr), torch._foreach_mul(torch._foreach_add(u_list, self.epsilon), 1 - beta_1_power)), alpha=-1) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_lion.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + m_list = [self._momentums[self._get_variable_index(variable)].value for variable in keras_variables] + c_t = torch._foreach_mul(m_list, self.beta_1) + torch._foreach_add_(c_t, grads, alpha=1 - self.beta_1) + c_t = [c.sign() for c in c_t] + torch._foreach_add_(variables, torch._foreach_mul(c_t, lr), alpha=-1) + torch._foreach_mul_(m_list, self.beta_2) + torch._foreach_add_(m_list, grads, alpha=1 - self.beta_2) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_nadam.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch import core +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class Nadam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Nadam): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + local_step = ops.cast(self.iterations + 1, dtype) + next_step = ops.cast(self.iterations + 2, dtype) + decay = ops.cast(0.96, dtype) + beta_1 = ops.cast(self.beta_1, dtype) + beta_2 = ops.cast(self.beta_2, dtype) + u_t = beta_1 * (1.0 - 0.5 * ops.power(decay, local_step)) + u_t_1 = beta_1 * (1.0 - 0.5 * ops.power(decay, next_step)) + u_product_t = self._u_product.value * u_t + u_product_t_1 = u_product_t * u_t_1 + beta_2_power = ops.power(beta_2, local_step) + self._u_product.assign(u_product_t) + m_list = [self._momentums[self._get_variable_index(variable)].value for variable in keras_variables] + v_list = [self._velocities[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(m_list, self.beta_1) + torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1) + torch._foreach_mul_(v_list, self.beta_2) + torch._foreach_add_(v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2) + m_hat_list = torch._foreach_add(torch._foreach_div(torch._foreach_mul(m_list, u_t_1), 1 - core.convert_to_numpy(u_product_t_1)), torch._foreach_div(torch._foreach_mul(grads, 1 - u_t), 1 - core.convert_to_numpy(u_product_t))) + v_hat_list = torch._foreach_div(v_list, 1 - beta_2_power) + torch._foreach_add_(variables, torch._foreach_div(torch._foreach_mul(m_hat_list, lr), torch._foreach_add(torch._foreach_sqrt(v_hat_list), self.epsilon)), alpha=-1) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_optimizer.py +import torch +from keras.src import optimizers +from keras.src.optimizers.base_optimizer import BaseOptimizer +from keras.src.utils import torch_utils + +class TorchOptimizer(BaseOptimizer): + + def __new__(cls, *args, **kwargs): + from keras.src.backend.torch.optimizers import torch_adadelta + from keras.src.backend.torch.optimizers import torch_adagrad + from keras.src.backend.torch.optimizers import torch_adam + from keras.src.backend.torch.optimizers import torch_adamax + from keras.src.backend.torch.optimizers import torch_adamw + from keras.src.backend.torch.optimizers import torch_lion + from keras.src.backend.torch.optimizers import torch_nadam + from keras.src.backend.torch.optimizers import torch_rmsprop + from keras.src.backend.torch.optimizers import torch_sgd + OPTIMIZERS = {optimizers.Adadelta: torch_adadelta.Adadelta, optimizers.Adagrad: torch_adagrad.Adagrad, optimizers.Adam: torch_adam.Adam, optimizers.Adamax: torch_adamax.Adamax, optimizers.AdamW: torch_adamw.AdamW, optimizers.Lion: torch_lion.Lion, optimizers.Nadam: torch_nadam.Nadam, optimizers.RMSprop: torch_rmsprop.RMSprop, optimizers.SGD: torch_sgd.SGD} + if cls in OPTIMIZERS: + return OPTIMIZERS[cls](*args, **kwargs) + return super().__new__(cls) + + @torch_utils.no_grad + def _apply_weight_decay(self, variables): + if self.weight_decay is None: + return + torch._foreach_mul_([v.value for v in variables if self._use_weight_decay(v)], 1 - self.weight_decay * self._get_current_learning_rate()) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py +import torch +from keras.src.optimizers.base_optimizer import BaseOptimizer +from keras.src.utils import torch_utils + +class TorchParallelOptimizer(BaseOptimizer): + + @torch_utils.no_grad + def _backend_update_step(self, grads, trainable_variables, learning_rate): + self._parallel_update_step(grads, trainable_variables, learning_rate) + + @torch_utils.no_grad + def _backend_reset_gradient_accumulators(self): + acc_list = [v.value for v in self._accumulated_gradients] + torch._foreach_mul_(acc_list, 0.0) + + @torch_utils.no_grad + def _backend_increment_gradient_accumulators(self, grads, acc_grads): + acc_list = [v.value for v in acc_grads] + torch._foreach_add_(acc_list, grads, alpha=1.0) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_rmsprop.py +import torch +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class RMSprop(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.RMSprop): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + dtype = variables[0].dtype + lr = ops.cast(learning_rate, dtype) + velocities = [self._velocities[self._get_variable_index(variable)].value for variable in keras_variables] + rho = self.rho + torch._foreach_mul_(velocities, rho) + torch._foreach_add_(velocities, torch._foreach_mul(grads, grads), alpha=1 - rho) + denominators = torch._foreach_add(velocities, self.epsilon) + if self.centered: + average_grads = [self._average_gradients[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(average_grads, rho) + torch._foreach_add_(average_grads, grads, alpha=1 - rho) + torch._foreach_add_(denominators, torch._foreach_mul(average_grads, average_grads), alpha=-1) + torch._foreach_sqrt_(denominators) + increments = torch._foreach_div(torch._foreach_mul(grads, lr), denominators) + if self.momentum > 0: + momentum_list = [self._momentums[self._get_variable_index(variable)].value for variable in keras_variables] + torch._foreach_mul_(momentum_list, self.momentum) + torch._foreach_add_(momentum_list, increments) + torch._foreach_add_(variables, momentum_list, alpha=-1) + else: + torch._foreach_add_(variables, increments, alpha=-1) + +# File: keras-master/keras/src/backend/torch/optimizers/torch_sgd.py +import torch +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer + +class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD): + + def _parallel_update_step(self, grads, variables, learning_rate): + keras_variables = variables + variables = [v.value for v in variables] + if self.momentum != 0: + bufs = [self.momentums[self._get_variable_index(variable)].value for variable in keras_variables] + for i in range(len(bufs)): + if bufs[i] is None: + bufs[i] = torch.clone(grads[i]).detach() + torch._foreach_mul_(bufs, self.momentum) + torch._foreach_add_(bufs, grads, alpha=-learning_rate) + if self.nesterov: + torch._foreach_add_(variables, grads, alpha=-learning_rate) + torch._foreach_add_(variables, bufs, alpha=self.momentum) + else: + torch._foreach_add_(variables, bufs) + else: + torch._foreach_add_(variables, grads, alpha=-learning_rate) + +# File: keras-master/keras/src/backend/torch/random.py +import torch +import torch._dynamo as dynamo +import torch.nn.functional as tnn +from keras.src.backend.config import floatx +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.core import to_torch_dtype +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed + +@dynamo.disable() +def torch_seed_generator(seed): + (first_seed, second_seed) = draw_seed(seed) + device = get_device() + if device == 'meta': + return None + generator = torch.Generator(device=get_device()) + generator.manual_seed(int(first_seed + second_seed)) + return generator + +def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + dtype = to_torch_dtype(dtype) + if get_device() == 'meta': + return torch.normal(mean, stddev, size=shape, dtype=dtype, device=get_device()) + generator = torch_seed_generator(seed) + return torch.normal(mean, stddev, size=shape, generator=generator, dtype=dtype, device=get_device()) + +def categorical(logits, num_samples, dtype='int32', seed=None): + logits = convert_to_tensor(logits) + dtype = to_torch_dtype(dtype) + probs = torch.softmax(logits, dim=-1) + if get_device() == 'meta': + return torch.multinomial(probs, num_samples, replacement=True).type(dtype) + generator = torch_seed_generator(seed) + return torch.multinomial(probs, num_samples, replacement=True, generator=generator).type(dtype) + +def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + dtype = dtype or floatx() + dtype = to_torch_dtype(dtype) + requested_shape = shape + if len(requested_shape) == 0: + shape = (1,) + if get_device() == 'meta': + rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device()) + else: + generator = torch_seed_generator(seed) + rand_tensor = torch.rand(size=shape, generator=generator, dtype=dtype, device=get_device()) + output = (maxval - minval) * rand_tensor + minval + if len(requested_shape) == 0: + return output[0] + return output + +def randint(shape, minval, maxval, dtype='int32', seed=None): + dtype = to_torch_dtype(dtype) + if get_device() == 'meta': + return torch.randint(low=minval, high=maxval, size=shape, dtype=dtype, device=get_device()) + generator = torch_seed_generator(seed) + return torch.randint(low=minval, high=maxval, size=shape, generator=generator, dtype=dtype, device=get_device()) + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + dtype = to_torch_dtype(dtype) + x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed) + valid = (x > -2) & (x < 2) + indexes = valid.max(-1, keepdim=True)[1] + trunc_x = torch.empty(shape, dtype=dtype, device=get_device()) + trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1)) + trunc_x.data.mul_(stddev).add_(mean) + return trunc_x + +def _get_concrete_noise_shape(inputs, noise_shape): + if noise_shape is None: + return inputs.shape + concrete_inputs_shape = inputs.shape + concrete_noise_shape = [] + for (i, value) in enumerate(noise_shape): + concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value) + return concrete_noise_shape + +def dropout(inputs, rate, noise_shape=None, seed=None): + if seed is not None and (not (isinstance(seed, SeedGenerator) and seed._initial_seed is None)) or noise_shape is not None: + keep_prob = 1.0 - rate + noise_shape = _get_concrete_noise_shape(inputs, noise_shape) + keep_prob_matrix = torch.full(noise_shape, keep_prob, device=get_device()) + generator = torch_seed_generator(seed) + if get_device() == 'meta': + mask = torch.bernoulli(keep_prob_matrix) + else: + mask = torch.bernoulli(keep_prob_matrix, generator=generator) + mask = mask.bool() + mask = torch.broadcast_to(mask, inputs.shape) + return torch.where(mask, inputs / keep_prob, torch.zeros_like(inputs, dtype=inputs.dtype)) + return torch.nn.functional.dropout(inputs, p=rate, training=True, inplace=False) + +def shuffle(x, axis=0, seed=None): + x = convert_to_tensor(x) + if get_device() == 'meta': + row_perm = torch.rand(x.shape[:axis + 1], device=get_device()).argsort(axis) + else: + generator = torch_seed_generator(seed) + row_perm = torch.rand(x.shape[:axis + 1], generator=generator, device=get_device()).argsort(axis) + for _ in range(x.ndim - axis - 1): + row_perm.unsqueeze_(-1) + row_perm = row_perm.repeat(*[1 for _ in range(axis + 1)], *x.shape[axis + 1:]) + return x.gather(axis, row_perm) + +def gamma(shape, alpha, dtype=None, seed=None): + dtype = dtype or floatx() + dtype = to_torch_dtype(dtype) + alpha = torch.broadcast_to(convert_to_tensor(alpha), shape) + beta = torch.ones(shape, device=get_device()) + prev_rng_state = torch.random.get_rng_state() + if not get_device() == 'meta': + (first_seed, second_seed) = draw_seed(seed) + torch.manual_seed(first_seed + second_seed) + gamma_distribution = torch.distributions.gamma.Gamma(alpha, beta) + sample = gamma_distribution.sample().type(dtype) + torch.random.set_rng_state(prev_rng_state) + return sample + +def binomial(shape, counts, probabilities, dtype=None, seed=None): + dtype = dtype or floatx() + dtype = to_torch_dtype(dtype) + counts = torch.broadcast_to(convert_to_tensor(counts), shape) + probabilities = torch.broadcast_to(convert_to_tensor(probabilities), shape) + prev_rng_state = torch.random.get_rng_state() + if not get_device() == 'meta': + (first_seed, second_seed) = draw_seed(seed) + torch.manual_seed(first_seed + second_seed) + binomial_distribution = torch.distributions.binomial.Binomial(total_count=counts, probs=probabilities) + sample = binomial_distribution.sample().type(dtype) + torch.random.set_rng_state(prev_rng_state) + return sample + +def beta(shape, alpha, beta, dtype=None, seed=None): + dtype = dtype or floatx() + dtype = to_torch_dtype(dtype) + alpha = torch.broadcast_to(convert_to_tensor(alpha), shape) + beta = torch.broadcast_to(convert_to_tensor(beta), shape) + prev_rng_state = torch.random.get_rng_state() + if not get_device() == 'meta': + (first_seed, second_seed) = draw_seed(seed) + torch.manual_seed(first_seed + second_seed) + beta_distribution = torch.distributions.beta.Beta(concentration1=alpha, concentration0=beta) + sample = beta_distribution.sample().type(dtype) + torch.random.set_rng_state(prev_rng_state) + return sample + +# File: keras-master/keras/src/backend/torch/rnn.py +import torch +from keras.src import tree +from keras.src.backend.torch.core import convert_to_tensor + +def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False, return_all_outputs=True): + input_length = input_length or inputs.shape[1] + + def swap_batch_timestep(input_t): + axes = list(range(len(input_t.shape))) + (axes[0], axes[1]) = (1, 0) + return torch.permute(input_t, axes) + if not time_major: + inputs = tree.map_structure(swap_batch_timestep, inputs) + flattened_inputs = tree.flatten(inputs) + time_steps = flattened_inputs[0].shape[0] + time_steps_t = time_steps + if mask is not None: + if mask.dtype != torch.bool: + mask = mask.type(torch.bool) + if len(mask.shape) == 2: + mask = torch.unsqueeze(mask, -1) + if not time_major: + mask = swap_batch_timestep(mask) + if constants is None: + constants = [] + + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tree.is_nested(mask_t): + raise ValueError(f'mask_t is expected to be tensor, but got {mask_t}') + if tree.is_nested(input_t): + raise ValueError(f'input_t is expected to be tensor, but got {input_t}') + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = torch.unsqueeze(mask_t, -1) + multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:]) + return torch.tile(mask_t, multiples) + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + def _process_single_input_t(input_t): + input_t = torch.unbind(input_t) + if go_backwards: + input_t = input_t[::-1] + return input_t + if tree.is_nested(inputs): + processed_input = tree.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tree.pack_sequence_as(inputs, inp) + if mask is not None: + mask_list = torch.unbind(mask) + if go_backwards: + mask_list = torch.flip(mask_list, dims=mask_list.shape) + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + (output, new_states) = step_function(inp, tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + if not successive_outputs: + prev_output = torch.zeros_like(output) + else: + prev_output = successive_outputs[-1] + output = torch.where(tiled_mask_t, output, prev_output) + flat_states = tree.flatten(states) + flat_new_states = tree.flatten(new_states) + tiled_mask_t = tuple((_expand_mask(mask_t, s) for s in flat_states)) + flat_final_states = tuple((torch.where(m, s, ps) for (m, s, ps) in zip(tiled_mask_t, flat_new_states, flat_states))) + states = tree.pack_sequence_as(states, flat_final_states) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = torch.stack(successive_outputs) + if zero_output_for_mask: + last_output = torch.where(_expand_mask(mask_list[-1], last_output), last_output, torch.zeros_like(last_output)) + outputs = torch.where(_expand_mask(mask, outputs, fixed_dim=2), outputs, torch.zeros_like(outputs)) + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + (output, states) = step_function(inp, tuple(states) + tuple(constants)) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = torch.stack(successive_outputs) + else: + states = tuple(initial_states) + input_ta = tuple((list(torch.unbind(input_)) if not go_backwards else list(torch.unbind(torch.flip(input_, [0]))) for input_ in flattened_inputs)) + input_time_zero = tree.pack_sequence_as(inputs, [inp[0] for inp in flattened_inputs]) + (output_time_zero, _) = step_function(input_time_zero, tuple(initial_states) + tuple(constants)) + output_ta_size = time_steps_t if return_all_outputs else 1 + output_ta = [] + for out in tree.flatten(output_time_zero): + out_list = list(out) + if len(out) < output_ta_size: + out_list.extend([[]] * (output_ta_size - len(out))) + output_ta.append(out_list) + time = torch.tensor(0, dtype=torch.int32) + if input_length is None: + max_iterations = time_steps_t + elif hasattr(input_length, '__len__'): + input_length = convert_to_tensor(input_length) + max_iterations = torch.max(input_length) + else: + max_iterations = input_length + if mask is not None: + if go_backwards: + mask = torch.flip(mask, [0]) + mask_ta = list(torch.unbind(mask)) + + def masking_fn(time): + return mask_ta[time] + + def compute_masked_output(mask_t, flat_out, flat_mask): + tiled_mask_t = tuple((_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in flat_out)) + return tuple((torch.where(m, o, fm) for (m, o, fm) in zip(tiled_mask_t, flat_out, flat_mask))) + elif isinstance(input_length, torch.Tensor): + if go_backwards: + max_len = torch.max(input_length, dim=0) + rev_input_length = torch.subtract(max_len - 1, input_length) + + def masking_fn(time): + return torch.less(rev_input_length, time) + else: + + def masking_fn(time): + return torch.greater(input_length, time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + return tuple((torch.where(mask_t, o, zo) for (o, zo) in zip(flat_out, flat_mask))) + else: + masking_fn = None + if masking_fn is not None: + flat_zero_output = tuple((torch.zeros_like(o) for o in tree.flatten(output_time_zero))) + + def _step(time, output_ta_t, prev_output, *states): + current_input = tuple((ta[time] for ta in input_ta)) + current_input = tree.pack_sequence_as(inputs, current_input) + mask_t = masking_fn(time) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_output = tree.flatten(output) + flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output) + flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) + flat_state = tree.flatten(states) + flat_new_state = tree.flatten(new_states) + flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) + new_states = tree.pack_sequence_as(new_states, flat_final_state) + ta_index_to_write = time if return_all_outputs else 0 + for (ta, out) in zip(output_ta_t, flat_new_output): + ta[ta_index_to_write] = out + return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states) + it = 0 + (output_ta_t, new_states, prev_output) = (output_ta, states, flat_zero_output) + while time < time_steps_t and it < max_iterations: + final_outputs = _step(time, output_ta_t, prev_output, *new_states) + (time, output_ta_t, prev_output) = final_outputs[:3] + new_states = final_outputs[3:] + it += 1 + else: + + def _step(time, output_ta_t, *states): + current_input = tuple((ta[time] for ta in input_ta)) + current_input = tree.pack_sequence_as(inputs, current_input) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_new_state = tree.flatten(new_states) + flat_output = tree.flatten(output) + ta_index_to_write = time if return_all_outputs else 0 + for (ta, out) in zip(output_ta_t, flat_output): + ta[ta_index_to_write] = out + new_states = tree.pack_sequence_as(initial_states, flat_new_state) + return (time + 1, output_ta_t) + tuple(new_states) + it = 0 + output_ta_t = output_ta + new_states = states + while time < time_steps_t and it < max_iterations: + final_outputs = _step(time, output_ta_t, *new_states) + (time, output_ta_t) = final_outputs[:2] + new_states = final_outputs[2:] + it += 1 + + def _stack(tensor_list): + max_ndims = max([t.ndim for t in tensor_list]) + max_list = [] + for (i, t) in enumerate(tensor_list): + if t.ndim == max_ndims: + max_list.append(t) + return torch.stack(max_list) + output_ta = final_outputs[1] + outputs = tuple((_stack(o) for o in output_ta)) + last_output = tuple((o[-1] for o in outputs)) + outputs = tree.pack_sequence_as(output_time_zero, outputs) + last_output = tree.pack_sequence_as(output_time_zero, last_output) + if not time_major: + outputs = tree.map_structure(swap_batch_timestep, outputs) + return (last_output, outputs, new_states) + +def cudnn_ok(*args, **kwargs): + return False + +def lstm(*args, **kwargs): + raise NotImplementedError + +def gru(*args, **kwargs): + raise NotImplementedError + +# File: keras-master/keras/src/backend/torch/trainer.py +import warnings +import numpy as np +import torch +from packaging.version import parse +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils + +class TorchTrainer(base_trainer.Trainer): + + def __init__(self): + super().__init__() + self.train_function = None + self.test_function = None + self.predict_function = None + + def _should_torch_compile(self): + if self.jit_compile and parse(torch.__version__) < parse('2.1.0'): + warnings.warn('Please upgrade to torch>=2.1.0 for `jit_compile=True` to take effect. Using `jit_compile=False`') + self.jit_compile = False + return self.jit_compile + + def train_step(self, data): + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=True) + else: + y_pred = self(x) + self.zero_grad() + loss = self._compute_loss(x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=True) + self._loss_tracker.update_state(loss, sample_weight=tree.flatten(x)[0].shape[0]) + if self.optimizer is not None: + loss = self.optimizer.scale_loss(loss) + if self.trainable_weights: + loss.backward() + trainable_weights = self.trainable_weights[:] + gradients = [v.value.grad for v in trainable_weights] + with torch.no_grad(): + self.optimizer.apply(gradients, trainable_weights) + else: + warnings.warn('The model does not have any trainable weights.') + return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) + + def test_step(self, data): + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + loss = self._compute_loss(x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False) + self._loss_tracker.update_state(loss, sample_weight=tree.flatten(x)[0].shape[0]) + return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) + + def predict_step(self, data): + (x, _, _) = data_adapter_utils.unpack_x_y_sample_weight(data) + if self._call_has_training_arg: + y_pred = self(x, training=False) + else: + y_pred = self(x) + return y_pred + + def make_train_function(self, force=False): + if self.train_function is not None and (not force): + return self.train_function + if self.steps_per_execution > 1: + raise ValueError(f'`steps_per_execution` must be 1 with the PyTorch backend. Received: steps_per_execution={self.steps_per_execution}') + + def one_step_on_data(data): + data = data[0] + return self.train_step(data) + if self._should_torch_compile(): + self.train_function = torch.compile(one_step_on_data) + else: + self.train_function = one_step_on_data + + def make_test_function(self, force=False): + if self.test_function is not None and (not force): + return self.test_function + if self.steps_per_execution > 1: + raise ValueError(f'`steps_per_execution` must be 1 with the PyTorch backend. Received: steps_per_execution={self.steps_per_execution}') + + def one_step_on_data(data): + data = data[0] + with torch.no_grad(): + return self.test_step(data) + if self._should_torch_compile(): + self.test_function = torch.compile(one_step_on_data) + else: + self.test_function = one_step_on_data + + def make_predict_function(self, force=False): + if self.predict_function is not None and (not force): + return self.predict_function + if self.steps_per_execution > 1: + raise ValueError(f'`steps_per_execution` must be 1 with the PyTorch backend. Received: steps_per_execution={self.steps_per_execution}') + + def one_step_on_data(data): + data = data[0] + with torch.no_grad(): + return self.predict_step(data) + if self._should_torch_compile(): + self.predict_function = torch.compile(one_step_on_data) + else: + self.predict_function = one_step_on_data + + @traceback_utils.filter_traceback + def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1): + if not self.compiled: + raise ValueError('You must call `compile()` before calling `fit()`.') + self._eval_epoch_iterator = None + if validation_split and validation_data is None: + ((x, y, sample_weight), validation_data) = array_slicing.train_validation_split((x, y, sample_weight), validation_split=validation_split) + if validation_data is not None: + (val_x, val_y, val_sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(validation_data) + epoch_iterator = TorchEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, shuffle=shuffle, class_weight=class_weight, steps_per_execution=self.steps_per_execution) + self._symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=epochs, steps=epoch_iterator.num_batches, model=self) + self.stop_training = False + self.make_train_function() + callbacks.on_train_begin() + initial_epoch = self._initial_epoch or initial_epoch + for epoch in range(initial_epoch, epochs): + self.reset_metrics() + callbacks.on_epoch_begin(epoch) + self.train() + logs = {} + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_train_batch_begin(step) + logs = self.train_function(data) + logs = self._pythonify_logs(logs) + callbacks.on_train_batch_end(step, logs) + if self.stop_training: + break + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) + self.eval() + if validation_data is not None and self._should_eval(epoch, validation_freq): + if getattr(self, '_eval_epoch_iterator', None) is None: + self._eval_epoch_iterator = TorchEpochIterator(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps_per_execution=self.steps_per_execution, steps_per_epoch=validation_steps, shuffle=False) + val_logs = self.evaluate(x=val_x, y=val_y, sample_weight=val_sample_weight, batch_size=validation_batch_size or batch_size, steps=validation_steps, callbacks=callbacks, return_dict=True, _use_cached_eval_dataset=True) + val_logs = {'val_' + name: val for (name, val) in val_logs.items()} + epoch_logs.update(val_logs) + callbacks.on_epoch_end(epoch, epoch_logs) + training_logs = epoch_logs + if self.stop_training: + break + if isinstance(self.optimizer, optimizers_module.Optimizer) and epochs > 0: + self.optimizer.finalize_variable_values(self.trainable_weights) + if getattr(self, '_eval_epoch_iterator', None) is not None: + del self._eval_epoch_iterator + callbacks.on_train_end(logs=training_logs) + return self.history + + @traceback_utils.filter_traceback + def evaluate(self, x=None, y=None, batch_size=None, verbose='auto', sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs): + use_cached_eval_dataset = kwargs.pop('_use_cached_eval_dataset', False) + if kwargs: + raise ValueError(f'Arguments not recognized: {kwargs}') + if use_cached_eval_dataset: + epoch_iterator = self._eval_epoch_iterator + else: + epoch_iterator = TorchEpochIterator(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + self._symbolic_build(iterator=epoch_iterator) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + self.eval() + self.make_test_function() + self.stop_evaluating = False + callbacks.on_test_begin() + logs = {} + self.reset_metrics() + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_test_batch_begin(step) + logs = self.test_function(data) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) + if self.stop_evaluating: + break + logs = self._get_metrics_result_or_logs(logs) + callbacks.on_test_end(logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + @traceback_utils.filter_traceback + def predict(self, x, batch_size=None, verbose='auto', steps=None, callbacks=None): + epoch_iterator = TorchEpochIterator(x=x, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution) + if not isinstance(callbacks, callbacks_module.CallbackList): + callbacks = callbacks_module.CallbackList(callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self) + + def append_to_outputs(batch_outputs, outputs): + if outputs is None: + outputs = tree.map_structure(lambda batch_output: [batch_output], batch_outputs) + else: + tree.map_structure_up_to(batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs) + return outputs + self.eval() + self.make_predict_function() + self.stop_predicting = False + callbacks.on_predict_begin() + outputs = None + for (step, data) in epoch_iterator.enumerate_epoch(): + callbacks.on_predict_batch_begin(step) + batch_outputs = self.predict_function(data) + outputs = append_to_outputs(batch_outputs, outputs) + callbacks.on_predict_batch_end(step, {'outputs': batch_outputs}) + if self.stop_predicting: + break + callbacks.on_predict_end() + outputs = tree.map_structure(backend.convert_to_numpy, outputs) + return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs) + + def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False): + self._assert_compile_called('train_on_batch') + if class_weight is not None: + if sample_weight is not None: + raise ValueError(f'Arguments `sample_weight` and `class_weight` cannot be specified at the same time. Received: sample_weight={sample_weight}, class_weight={class_weight}') + sample_weight = data_adapter_utils.class_weight_to_sample_weights(y, class_weight) + data = (x, y, sample_weight) + self._symbolic_build(data_batch=data) + self.make_train_function() + logs = self.train_function([data]) + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False): + self._assert_compile_called('test_on_batch') + data = (x, y, sample_weight) + self._symbolic_build(data_batch=data) + self.make_test_function() + logs = self.test_function([data]) + logs = tree.map_structure(lambda x: np.array(x), logs) + if return_dict: + return logs + return self._flatten_metrics_in_order(logs) + + def predict_on_batch(self, x): + self.make_predict_function() + batch_outputs = self.predict_function([(x,)]) + batch_outputs = tree.map_structure(backend.convert_to_numpy, batch_outputs) + return batch_outputs + +class TorchEpochIterator(EpochIterator): + + def _get_iterator(self): + return self.data_adapter.get_torch_dataloader() + +# File: keras-master/keras/src/callbacks/__init__.py +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN + +# File: keras-master/keras/src/callbacks/backup_and_restore.py +import json +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils + +@keras_export('keras.callbacks.BackupAndRestore') +class BackupAndRestore(Callback): + + def __init__(self, backup_dir, save_freq='epoch', delete_checkpoint=True): + super().__init__() + self.save_freq = save_freq + self.delete_checkpoint = delete_checkpoint + self._batches_seen_since_last_saving = 0 + self._last_batch_seen = 0 + self._current_epoch = 0 + if not backup_dir: + raise ValueError('Empty `backup_dir` argument passed') + self.backup_dir = backup_dir + self._weights_path = file_utils.join(backup_dir, 'latest.weights.h5') + self._training_metadata_path = file_utils.join(backup_dir, 'training_metadata.json') + if save_freq != 'epoch' and (not isinstance(save_freq, int)): + raise ValueError(f"Invalid value for argument `save_freq`. Received: save_freq={save_freq}. Expected either 'epoch' or an integer value.") + + def on_train_begin(self, logs=None): + if not self.model.built: + raise ValueError(f'To use the BackupAndRestore callback, you model must be built before you call `fit()`. Model {self.model} is unbuilt. You can build it beforehand by calling it on a batch of data.') + if file_utils.exists(self._weights_path): + if self.model.optimizer is not None and (not self.model.optimizer.built): + self.model.optimizer.build(self.model.trainable_variables) + self.model.load_weights(self._weights_path) + if file_utils.exists(self._training_metadata_path): + with file_utils.File(self._training_metadata_path, 'r') as f: + training_metadata = json.loads(f.read()) + epoch = training_metadata['epoch'] + self.model._initial_epoch = epoch + + def on_epoch_end(self, epoch, logs=None): + self._current_epoch = epoch + 1 + self._last_batch_seen = 0 + if self.save_freq == 'epoch': + self._save_model() + + def on_train_batch_end(self, batch, logs=None): + if self._should_save_on_batch(batch): + self._save_model() + + def _save_model(self): + if not file_utils.exists(self.backup_dir): + file_utils.makedirs(self.backup_dir) + self.model.save_weights(filepath=self._weights_path, overwrite=True) + with file_utils.File(self._training_metadata_path, 'w') as f: + training_metadata = {'epoch': self._current_epoch, 'batch': self._last_batch_seen} + f.write(json.dumps(training_metadata)) + + def _should_save_on_batch(self, batch): + if self.save_freq == 'epoch': + return False + if batch <= self._last_batch_seen: + add_batches = batch + 1 + else: + add_batches = batch - self._last_batch_seen + self._batches_seen_since_last_saving += add_batches + self._last_batch_seen = batch + if self._batches_seen_since_last_saving >= self.save_freq: + self._batches_seen_since_last_saving = 0 + return True + return False + + def on_train_end(self, logs=None): + if self.delete_checkpoint and file_utils.exists(self.backup_dir): + file_utils.rmtree(self.backup_dir) + +# File: keras-master/keras/src/callbacks/callback.py +from keras.src import backend +from keras.src.api_export import keras_export + +@keras_export('keras.callbacks.Callback') +class Callback: + + def __init__(self): + self.params = None + self._model = None + + def set_params(self, params): + self.params = params + + def set_model(self, model): + self._model = model + + @property + def model(self): + if backend.backend() == 'jax' and hasattr(self._model, 'jax_state_sync'): + self._model.jax_state_sync() + return self._model + + def on_batch_begin(self, batch, logs=None): + + def on_batch_end(self, batch, logs=None): + + def on_epoch_begin(self, epoch, logs=None): + + def on_epoch_end(self, epoch, logs=None): + + def on_train_batch_begin(self, batch, logs=None): + self.on_batch_begin(batch, logs=logs) + + def on_train_batch_end(self, batch, logs=None): + self.on_batch_end(batch, logs=logs) + + def on_test_batch_begin(self, batch, logs=None): + + def on_test_batch_end(self, batch, logs=None): + + def on_predict_batch_begin(self, batch, logs=None): + + def on_predict_batch_end(self, batch, logs=None): + + def on_train_begin(self, logs=None): + + def on_train_end(self, logs=None): + + def on_test_begin(self, logs=None): + + def on_test_end(self, logs=None): + + def on_predict_begin(self, logs=None): + + def on_predict_end(self, logs=None): + +# File: keras-master/keras/src/callbacks/callback_list.py +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.history import History +from keras.src.callbacks.progbar_logger import ProgbarLogger + +@keras_export('keras.callbacks.CallbackList') +class CallbackList(Callback): + + def __init__(self, callbacks=None, add_history=False, add_progbar=False, model=None, **params): + self.callbacks = tree.flatten(callbacks) if callbacks else [] + self._add_default_callbacks(add_history, add_progbar) + if model: + self.set_model(model) + if params: + self.set_params(params) + + def _add_default_callbacks(self, add_history, add_progbar): + self._progbar = None + self._history = None + for cb in self.callbacks: + if isinstance(cb, ProgbarLogger): + self._progbar = cb + elif isinstance(cb, History): + self._history = cb + if self._history is None and add_history: + self._history = History() + self.callbacks.append(self._history) + if self._progbar is None and add_progbar: + self._progbar = ProgbarLogger() + self.callbacks.append(self._progbar) + + def append(self, callback): + self.callbacks.append(callback) + + def set_params(self, params): + self.params = params + for callback in self.callbacks: + callback.set_params(params) + + def set_model(self, model): + super().set_model(model) + if self._history: + model.history = self._history + for callback in self.callbacks: + callback.set_model(model) + + def on_batch_begin(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_batch_begin(batch, logs=logs) + + def on_batch_end(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_batch_end(batch, logs=logs) + + def on_epoch_begin(self, epoch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_epoch_begin(epoch, logs) + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_epoch_end(epoch, logs) + + def on_train_batch_begin(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_train_batch_begin(batch, logs=logs) + + def on_train_batch_end(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_train_batch_end(batch, logs=logs) + + def on_test_batch_begin(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_test_batch_begin(batch, logs=logs) + + def on_test_batch_end(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_test_batch_end(batch, logs=logs) + + def on_predict_batch_begin(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_predict_batch_begin(batch, logs=logs) + + def on_predict_batch_end(self, batch, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_predict_batch_end(batch, logs=logs) + + def on_train_begin(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_train_begin(logs) + + def on_train_end(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_train_end(logs) + + def on_test_begin(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_test_begin(logs) + + def on_test_end(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_test_end(logs) + + def on_predict_begin(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_predict_begin(logs) + + def on_predict_end(self, logs=None): + logs = logs or {} + for callback in self.callbacks: + callback.on_predict_end(logs) + +# File: keras-master/keras/src/callbacks/csv_logger.py +import collections +import csv +import numpy as np +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils + +@keras_export('keras.callbacks.CSVLogger') +class CSVLogger(Callback): + + def __init__(self, filename, separator=',', append=False): + super().__init__() + self.sep = separator + self.filename = file_utils.path_to_string(filename) + self.append = append + self.writer = None + self.keys = None + self.append_header = True + + def on_train_begin(self, logs=None): + if self.append: + if file_utils.exists(self.filename): + with file_utils.File(self.filename, 'r') as f: + self.append_header = not bool(len(f.readline())) + mode = 'a' + else: + mode = 'w' + self.csv_file = file_utils.File(self.filename, mode) + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + + def handle_value(k): + is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0 + if isinstance(k, str): + return k + elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray): + return f'''"[{', '.join(map(str, k))}]"''' + else: + return k + if self.keys is None: + self.keys = sorted(logs.keys()) + val_keys_found = False + for key in self.keys: + if key.startswith('val_'): + val_keys_found = True + break + if not val_keys_found: + self.keys.extend(['val_' + k for k in self.keys]) + if not self.writer: + + class CustomDialect(csv.excel): + delimiter = self.sep + fieldnames = ['epoch'] + self.keys + self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect) + if self.append_header: + self.writer.writeheader() + row_dict = collections.OrderedDict({'epoch': epoch}) + row_dict.update(((key, handle_value(logs.get(key, 'NA'))) for key in self.keys)) + self.writer.writerow(row_dict) + self.csv_file.flush() + + def on_train_end(self, logs=None): + self.csv_file.close() + self.writer = None + +# File: keras-master/keras/src/callbacks/early_stopping.py +import warnings +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.trainers import compile_utils +from keras.src.utils import io_utils + +@keras_export('keras.callbacks.EarlyStopping') +class EarlyStopping(Callback): + + def __init__(self, monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None, restore_best_weights=False, start_from_epoch=0): + super().__init__() + self.monitor = monitor + self.patience = patience + self.verbose = verbose + self.baseline = baseline + self.min_delta = abs(min_delta) + self.wait = 0 + self.stopped_epoch = 0 + self.restore_best_weights = restore_best_weights + self.best_weights = None + self.start_from_epoch = start_from_epoch + if mode not in ['auto', 'min', 'max']: + warnings.warn(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.', stacklevel=2) + mode = 'auto' + self.mode = mode + self.monitor_op = None + + def _set_monitor_op(self): + if self.mode == 'min': + self.monitor_op = ops.less + elif self.mode == 'max': + self.monitor_op = ops.greater + else: + metric_name = self.monitor.removeprefix('val_') + if metric_name == 'loss': + self.monitor_op = ops.less + if hasattr(self.model, 'metrics'): + all_metrics = [] + for m in self.model.metrics: + if isinstance(m, (compile_utils.CompileMetrics, compile_utils.MetricsList)): + all_metrics.extend(m.metrics) + for m in all_metrics: + if m.name == metric_name: + if hasattr(m, '_direction'): + if m._direction == 'up': + self.monitor_op = ops.greater + else: + self.monitor_op = ops.less + if self.monitor_op is None: + raise ValueError(f"EarlyStopping callback received monitor={self.monitor} but Keras isn't able to automatically determine whether that metric should be maximized or minimized. Pass `mode='max'` in order to do early stopping based on the highest metric value, or pass `mode='min'` in order to use the lowest value.") + if self.monitor_op == ops.less: + self.min_delta *= -1 + self.best = float('inf') if self.monitor_op == ops.less else -float('inf') + + def on_train_begin(self, logs=None): + self.wait = 0 + self.stopped_epoch = 0 + self.best_weights = None + self.best_epoch = 0 + + def on_epoch_end(self, epoch, logs=None): + if self.monitor_op is None: + self._set_monitor_op() + current = self.get_monitor_value(logs) + if current is None or epoch < self.start_from_epoch: + return + if self.restore_best_weights and self.best_weights is None: + self.best_weights = self.model.get_weights() + self.best_epoch = epoch + self.wait += 1 + if self._is_improvement(current, self.best): + self.best = current + self.best_epoch = epoch + if self.restore_best_weights: + self.best_weights = self.model.get_weights() + if self.baseline is None or self._is_improvement(current, self.baseline): + self.wait = 0 + return + if self.wait >= self.patience and epoch > 0: + self.stopped_epoch = epoch + self.model.stop_training = True + + def on_train_end(self, logs=None): + if self.stopped_epoch > 0 and self.verbose > 0: + io_utils.print_msg(f'Epoch {self.stopped_epoch + 1}: early stopping') + if self.restore_best_weights and self.best_weights is not None: + if self.verbose > 0: + io_utils.print_msg(f'Restoring model weights from the end of the best epoch: {self.best_epoch + 1}.') + self.model.set_weights(self.best_weights) + + def get_monitor_value(self, logs): + logs = logs or {} + monitor_value = logs.get(self.monitor) + if monitor_value is None: + warnings.warn(f"Early stopping conditioned on metric `{self.monitor}` which is not available. Available metrics are: {','.join(list(logs.keys()))}", stacklevel=2) + return monitor_value + + def _is_improvement(self, monitor_value, reference_value): + return self.monitor_op(monitor_value - self.min_delta, reference_value) + +# File: keras-master/keras/src/callbacks/history.py +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback + +@keras_export('keras.callbacks.History') +class History(Callback): + + def __init__(self): + super().__init__() + self.history = {} + + def on_train_begin(self, logs=None): + self.epoch = [] + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + self.epoch.append(epoch) + for (k, v) in logs.items(): + self.history.setdefault(k, []).append(v) + self.model.history = self + +# File: keras-master/keras/src/callbacks/lambda_callback.py +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback + +@keras_export('keras.callbacks.LambdaCallback') +class LambdaCallback(Callback): + + def __init__(self, on_epoch_begin=None, on_epoch_end=None, on_train_begin=None, on_train_end=None, on_train_batch_begin=None, on_train_batch_end=None, **kwargs): + super().__init__() + self.__dict__.update(kwargs) + if on_epoch_begin is not None: + self.on_epoch_begin = on_epoch_begin + if on_epoch_end is not None: + self.on_epoch_end = on_epoch_end + if on_train_begin is not None: + self.on_train_begin = on_train_begin + if on_train_end is not None: + self.on_train_end = on_train_end + if on_train_batch_begin is not None: + self.on_train_batch_begin = on_train_batch_begin + if on_train_batch_end is not None: + self.on_train_batch_end = on_train_batch_end + +# File: keras-master/keras/src/callbacks/learning_rate_scheduler.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils + +@keras_export('keras.callbacks.LearningRateScheduler') +class LearningRateScheduler(Callback): + + def __init__(self, schedule, verbose=0): + super().__init__() + self.schedule = schedule + self.verbose = verbose + + def on_epoch_begin(self, epoch, logs=None): + if not hasattr(self.model.optimizer, 'learning_rate'): + raise ValueError('Optimizer must have a "learning_rate" attribute.') + try: + learning_rate = float(backend.convert_to_numpy(self.model.optimizer.learning_rate)) + learning_rate = self.schedule(epoch, learning_rate) + except TypeError: + learning_rate = self.schedule(epoch) + if not isinstance(learning_rate, (float, np.float32, np.float64)): + raise ValueError(f'The output of the `schedule` function should be a float. Got: {learning_rate}') + self.model.optimizer.learning_rate = learning_rate + if self.verbose > 0: + io_utils.print_msg(f'\nEpoch {epoch + 1}: LearningRateScheduler setting learning rate to {learning_rate}.') + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + logs['learning_rate'] = float(backend.convert_to_numpy(self.model.optimizer.learning_rate)) + +# File: keras-master/keras/src/callbacks/model_checkpoint.py +import os +import re +import warnings +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils +from keras.src.utils import io_utils + +@keras_export('keras.callbacks.ModelCheckpoint') +class ModelCheckpoint(Callback): + + def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch', initial_value_threshold=None): + super().__init__() + self.monitor = monitor + self.verbose = verbose + self.filepath = file_utils.path_to_string(filepath) + self.save_best_only = save_best_only + self.save_weights_only = save_weights_only + self.save_freq = save_freq + self._batches_seen_since_last_saving = 0 + self._last_batch_seen = 0 + self.best = initial_value_threshold + if mode not in ['auto', 'min', 'max']: + warnings.warn(f"ModelCheckpoint mode '{mode}' is unknown, fallback to auto mode.", stacklevel=2) + mode = 'auto' + if mode == 'min': + self.monitor_op = np.less + if self.best is None: + self.best = np.inf + elif mode == 'max': + self.monitor_op = np.greater + if self.best is None: + self.best = -np.inf + elif 'acc' in self.monitor or self.monitor.startswith('fmeasure'): + self.monitor_op = np.greater + if self.best is None: + self.best = -np.inf + else: + self.monitor_op = np.less + if self.best is None: + self.best = np.inf + if self.save_freq != 'epoch' and (not isinstance(self.save_freq, int)): + raise ValueError(f"Unrecognized save_freq: {self.save_freq}. Expected save_freq are 'epoch' or integer values") + if save_weights_only: + if not self.filepath.endswith('.weights.h5'): + raise ValueError(f'When using `save_weights_only=True` in `ModelCheckpoint`, the filepath provided must end in `.weights.h5` (Keras weights format). Received: filepath={self.filepath}') + elif not self.filepath.endswith('.keras'): + raise ValueError(f'The filepath provided must end in `.keras` (Keras model format). Received: filepath={self.filepath}') + + def on_train_batch_end(self, batch, logs=None): + if self._should_save_on_batch(batch): + self._save_model(epoch=self._current_epoch, batch=batch, logs=logs) + + def on_epoch_begin(self, epoch, logs=None): + self._current_epoch = epoch + + def on_epoch_end(self, epoch, logs=None): + if self.save_freq == 'epoch': + self._save_model(epoch=epoch, batch=None, logs=logs) + + def _should_save_on_batch(self, batch): + if self.save_freq == 'epoch': + return False + if batch <= self._last_batch_seen: + add_batches = batch + 1 + else: + add_batches = batch - self._last_batch_seen + self._batches_seen_since_last_saving += add_batches + self._last_batch_seen = batch + if self._batches_seen_since_last_saving >= self.save_freq: + self._batches_seen_since_last_saving = 0 + return True + return False + + def _save_model(self, epoch, batch, logs): + logs = logs or {} + filepath = self._get_file_path(epoch, batch, logs) + dirname = os.path.dirname(filepath) + if dirname and (not file_utils.exists(dirname)): + file_utils.makedirs(dirname) + try: + if self.save_best_only: + current = logs.get(self.monitor) + if current is None: + warnings.warn(f'Can save best model only with {self.monitor} available, skipping.', stacklevel=2) + elif (isinstance(current, np.ndarray) or backend.is_tensor(current)) and len(current.shape) > 0: + warnings.warn(f'Can save best model only when `monitor` is a scalar value. Received: {current}. Falling back to `save_best_only=False`.') + self.model.save(filepath, overwrite=True) + elif self.monitor_op(current, self.best): + if self.verbose > 0: + io_utils.print_msg(f'\nEpoch {epoch + 1}: {self.monitor} improved from {self.best:.5f} to {current:.5f}, saving model to {filepath}') + self.best = current + if self.save_weights_only: + self.model.save_weights(filepath, overwrite=True) + else: + self.model.save(filepath, overwrite=True) + elif self.verbose > 0: + io_utils.print_msg(f'\nEpoch {epoch + 1}: {self.monitor} did not improve from {self.best:.5f}') + else: + if self.verbose > 0: + io_utils.print_msg(f'\nEpoch {epoch + 1}: saving model to {filepath}') + if self.save_weights_only: + self.model.save_weights(filepath, overwrite=True) + else: + self.model.save(filepath, overwrite=True) + except IsADirectoryError: + raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {filepath}') + except IOError as e: + if 'is a directory' in str(e.args[0]).lower(): + raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: f{filepath}') + raise e + + def _get_file_path(self, epoch, batch, logs): + try: + if batch is None or 'batch' in logs: + file_path = self.filepath.format(epoch=epoch + 1, **logs) + else: + file_path = self.filepath.format(epoch=epoch + 1, batch=batch + 1, **logs) + except KeyError as e: + raise KeyError(f'Failed to format this callback filepath: "{self.filepath}". Reason: {e}') + return file_path + + def _checkpoint_exists(self, filepath): + return file_utils.exists(filepath) + + def _get_most_recently_modified_file_matching_pattern(self, pattern): + dir_name = os.path.dirname(pattern) + base_name = os.path.basename(pattern) + base_name_regex = '^' + re.sub('{.*}', '.*', base_name) + '$' + latest_mod_time = 0 + file_path_with_latest_mod_time = None + n_file_with_latest_mod_time = 0 + file_path_with_largest_file_name = None + if file_utils.exists(dir_name): + for file_name in os.listdir(dir_name): + if re.match(base_name_regex, file_name): + file_path = os.path.join(dir_name, file_name) + mod_time = os.path.getmtime(file_path) + if file_path_with_largest_file_name is None or file_path > file_path_with_largest_file_name: + file_path_with_largest_file_name = file_path + if mod_time > latest_mod_time: + latest_mod_time = mod_time + file_path_with_latest_mod_time = file_path + n_file_with_latest_mod_time = 1 + elif mod_time == latest_mod_time: + n_file_with_latest_mod_time += 1 + if n_file_with_latest_mod_time == 1: + return file_path_with_latest_mod_time + else: + return file_path_with_largest_file_name + +# File: keras-master/keras/src/callbacks/progbar_logger.py +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils +from keras.src.utils.progbar import Progbar + +@keras_export('keras.callbacks.ProgbarLogger') +class ProgbarLogger(Callback): + + def __init__(self): + super().__init__() + self.seen = 0 + self.progbar = None + self.target = None + self.verbose = 1 + self.epochs = 1 + self._called_in_fit = False + + def set_params(self, params): + verbose = params['verbose'] + if verbose == 'auto': + verbose = 1 + self.verbose = verbose + self.epochs = params['epochs'] + self.target = params['steps'] + + def on_train_begin(self, logs=None): + self._called_in_fit = True + + def on_test_begin(self, logs=None): + if not self._called_in_fit: + self._reset_progbar() + self._maybe_init_progbar() + + def on_predict_begin(self, logs=None): + self._reset_progbar() + self._maybe_init_progbar() + + def on_epoch_begin(self, epoch, logs=None): + self._reset_progbar() + self._maybe_init_progbar() + if self.verbose and self.epochs > 1: + io_utils.print_msg(f'Epoch {epoch + 1}/{self.epochs}') + + def on_train_batch_end(self, batch, logs=None): + self._update_progbar(batch, logs) + + def on_test_batch_end(self, batch, logs=None): + if not self._called_in_fit: + self._update_progbar(batch, logs) + + def on_predict_batch_end(self, batch, logs=None): + self._update_progbar(batch, None) + + def on_epoch_end(self, epoch, logs=None): + self._finalize_progbar(logs) + + def on_test_end(self, logs=None): + if not self._called_in_fit: + self._finalize_progbar(logs) + + def on_predict_end(self, logs=None): + self._finalize_progbar(logs) + + def _reset_progbar(self): + self.seen = 0 + self.progbar = None + + def _maybe_init_progbar(self): + if self.progbar is None: + self.progbar = Progbar(target=self.target, verbose=self.verbose, unit_name='step') + + def _update_progbar(self, batch, logs=None): + logs = logs or {} + self._maybe_init_progbar() + self.seen = batch + 1 + if self.verbose == 1: + self.progbar.update(self.seen, list(logs.items()), finalize=False) + + def _finalize_progbar(self, logs): + logs = logs or {} + if self.target is None: + self.target = self.seen + self.progbar.target = self.target + self.progbar.update(self.target, list(logs.items()), finalize=True) + +# File: keras-master/keras/src/callbacks/reduce_lr_on_plateau.py +import warnings +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils + +@keras_export('keras.callbacks.ReduceLROnPlateau') +class ReduceLROnPlateau(Callback): + + def __init__(self, monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0.0, **kwargs): + super().__init__() + self.monitor = monitor + if factor >= 1.0: + raise ValueError(f'ReduceLROnPlateau does not support a factor >= 1.0. Received factor={factor}') + self.factor = factor + self.min_lr = min_lr + self.min_delta = min_delta + self.patience = patience + self.verbose = verbose + self.cooldown = cooldown + self.cooldown_counter = 0 + self.wait = 0 + self.best = 0 + self.mode = mode + self.monitor_op = None + self._reset() + + def _reset(self): + if self.mode not in {'auto', 'min', 'max'}: + warnings.warn(f'Learning rate reduction mode {self.mode} is unknown, fallback to auto mode.', stacklevel=2) + self.mode = 'auto' + if self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor): + self.monitor_op = lambda a, b: np.less(a, b - self.min_delta) + self.best = np.inf + else: + self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta) + self.best = -np.inf + self.cooldown_counter = 0 + self.wait = 0 + + def on_train_begin(self, logs=None): + self._reset() + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + logs['learning_rate'] = float(backend.convert_to_numpy(self.model.optimizer.learning_rate)) + current = logs.get(self.monitor) + if current is None: + warnings.warn(f"Learning rate reduction is conditioned on metric `{self.monitor}` which is not available. Available metrics are: {','.join(list(logs.keys()))}.", stacklevel=2) + else: + if self.in_cooldown(): + self.cooldown_counter -= 1 + self.wait = 0 + if self.monitor_op(current, self.best): + self.best = current + self.wait = 0 + elif not self.in_cooldown(): + self.wait += 1 + if self.wait >= self.patience: + old_lr = float(backend.convert_to_numpy(self.model.optimizer.learning_rate)) + if old_lr > np.float32(self.min_lr): + new_lr = old_lr * self.factor + new_lr = max(new_lr, self.min_lr) + self.model.optimizer.learning_rate = new_lr + if self.verbose > 0: + io_utils.print_msg(f'\nEpoch {epoch + 1}: ReduceLROnPlateau reducing learning rate to {new_lr}.') + self.cooldown_counter = self.cooldown + self.wait = 0 + + def in_cooldown(self): + return self.cooldown_counter > 0 + +# File: keras-master/keras/src/callbacks/remote_monitor.py +import json +import warnings +import numpy as np +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +try: + import requests +except ImportError: + requests = None + +@keras_export('keras.callbacks.RemoteMonitor') +class RemoteMonitor(Callback): + + def __init__(self, root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None, send_as_json=False): + super().__init__() + self.root = root + self.path = path + self.field = field + self.headers = headers + self.send_as_json = send_as_json + + def on_epoch_end(self, epoch, logs=None): + if requests is None: + raise ImportError('RemoteMonitor requires the `requests` library.') + logs = logs or {} + send = {} + send['epoch'] = epoch + for (k, v) in logs.items(): + if isinstance(v, (np.ndarray, np.generic)): + send[k] = v.item() + else: + send[k] = v + try: + if self.send_as_json: + requests.post(self.root + self.path, json=send, headers=self.headers) + else: + requests.post(self.root + self.path, {self.field: json.dumps(send)}, headers=self.headers) + except requests.exceptions.RequestException: + warnings.warn(f'Could not reach RemoteMonitor root server at {self.root}', stacklevel=2) + +# File: keras-master/keras/src/callbacks/swap_ema_weights.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback + +@keras_export('keras.callbacks.SwapEMAWeights') +class SwapEMAWeights(Callback): + + def __init__(self, swap_on_epoch=False): + super().__init__() + self.swap_on_epoch = swap_on_epoch + self._ema_weights_in_model = False + + def _tf_swap_variables(self, optimizer): + for (var, average_var) in zip(self.model.trainable_variables, optimizer._model_variables_moving_average): + if isinstance(var, backend.Variable): + var = var.value + if isinstance(average_var, backend.Variable): + average_var = average_var.value + optimizer._distribution_strategy.extended.update(var, lambda a, b: a.assign_add(b), args=(average_var,)) + optimizer._distribution_strategy.extended.update(var, lambda a, b: b.assign(a - b), args=(average_var,)) + optimizer._distribution_strategy.extended.update(var, lambda a, b: a.assign(a - b), args=(average_var,)) + + def _backend_swap_variables(self, optimizer): + for (var, average_var) in zip(self.model.trainable_variables, optimizer._model_variables_moving_average): + temporary_variable = ops.convert_to_numpy(var) + var.assign(average_var) + average_var.assign(temporary_variable) + + def _tf_finalize_ema_values(self, optimizer): + for (var, average_var) in zip(self.model.trainable_variables, optimizer._model_variables_moving_average): + if isinstance(var, backend.Variable): + var = var.value + if isinstance(average_var, backend.Variable): + average_var = average_var.value + optimizer._distribution_strategy.extended.update(average_var, lambda a, b: a.assign(b), args=(var,)) + + def _backend_finalize_ema_values(self, optimizer): + for (var, average_var) in zip(self.model.trainable_variables, optimizer._model_variables_moving_average): + average_var.assign(var) + + def _swap_variables(self): + if hasattr(self.model.optimizer, 'inner_optimizer'): + optimizer = self.model.optimizer.inner_optimizer + else: + optimizer = self.model.optimizer + if not hasattr(optimizer, '_model_variables_moving_average'): + raise ValueError(f'SwapEMAWeights must be used when `use_ema=True` is set on the optimizer. Received: use_ema={optimizer.use_ema}') + if backend.backend() == 'tensorflow': + self._tf_swap_variables(optimizer) + else: + self._backend_swap_variables(optimizer) + + def _finalize_ema_values(self): + if hasattr(self.model.optimizer, 'inner_optimizer'): + optimizer = self.model.optimizer.inner_optimizer + else: + optimizer = self.model.optimizer + if not hasattr(optimizer, '_model_variables_moving_average'): + raise ValueError(f'SwapEMAWeights must be used when `use_ema=True` is set on the optimizer. Received: use_ema={optimizer.use_ema}') + if backend.backend() == 'tensorflow': + self._tf_finalize_ema_values(optimizer) + else: + self._backend_finalize_ema_values(optimizer) + + def on_epoch_begin(self, epoch, logs=None): + if self.swap_on_epoch and self._ema_weights_in_model: + self._swap_variables() + self._ema_weights_in_model = False + + def on_epoch_end(self, epoch, logs=None): + if self.swap_on_epoch and (not self._ema_weights_in_model): + self._swap_variables() + self._ema_weights_in_model = True + if epoch == self.params['epochs'] - 1: + self._finalize_ema_values() + + def on_test_begin(self, logs=None): + if not self._ema_weights_in_model: + self._swap_variables() + self._ema_weights_in_model = True + + def on_test_end(self, logs=None): + if self._ema_weights_in_model: + self._swap_variables() + self._ema_weights_in_model = False + + def on_predict_begin(self, logs=None): + if not self._ema_weights_in_model: + self._swap_variables() + self._ema_weights_in_model = True + + def on_predict_end(self, logs=None): + if not self._ema_weights_in_model: + self._swap_variables() + self._ema_weights_in_model = False + +# File: keras-master/keras/src/callbacks/tensorboard.py +import logging +import os +import sys +import time +import warnings +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.layers import Embedding +from keras.src.optimizers import Optimizer +from keras.src.utils import file_utils + +@keras_export('keras.callbacks.TensorBoard') +class TensorBoard(Callback): + + def __init__(self, log_dir='logs', histogram_freq=0, write_graph=True, write_images=False, write_steps_per_second=False, update_freq='epoch', profile_batch=0, embeddings_freq=0, embeddings_metadata=None): + super().__init__() + self.log_dir = str(log_dir) + self.histogram_freq = histogram_freq + self.write_graph = write_graph + self.write_images = write_images + self.write_steps_per_second = write_steps_per_second + self.update_freq = 1 if update_freq == 'batch' else update_freq + self.embeddings_freq = embeddings_freq + self.embeddings_metadata = embeddings_metadata + if profile_batch and backend.backend() != 'tensorflow': + raise ValueError(f"Profiling is not yet available with the {backend.backend()} backend. Please open a PR if you'd like to add this feature. Received: profile_batch={profile_batch} (must be 0)") + self._init_profile_batch(profile_batch) + self._global_train_batch = 0 + self._previous_epoch_iterations = 0 + self._train_accumulated_time = 0 + self._batch_start_time = 0 + self._summary_module = None + self._writers = {} + self._prev_summary_state = [] + + def set_model(self, model): + self._model = model + self._log_write_dir = self.log_dir + self._train_dir = os.path.join(self._log_write_dir, 'train') + self._train_step = 0 + self._val_dir = os.path.join(self._log_write_dir, 'validation') + self._val_step = 0 + self._writers = {} + self._should_write_train_graph = False + if self.write_graph: + self._write_keras_model_summary() + self._should_write_train_graph = True + if self.embeddings_freq: + self._configure_embeddings() + + @property + def summary(self): + if self._summary_module is None: + import tensorflow.summary as summary + self._summary_module = summary + return self._summary_module + + @property + def _train_writer(self): + if 'train' not in self._writers: + self._writers['train'] = self.summary.create_file_writer(self._train_dir) + return self._writers['train'] + + @property + def _val_writer(self): + if 'val' not in self._writers: + self._writers['val'] = self.summary.create_file_writer(self._val_dir) + return self._writers['val'] + + def _write_keras_model_train_graph(self): + with self._train_writer.as_default(): + train_fn = self.model.train_function + if hasattr(train_fn, 'function_spec'): + if hasattr(train_fn, '_concrete_stateful_fn'): + self.summary.graph(train_fn._concrete_stateful_fn.graph) + else: + self.summary.graph(train_fn._concrete_variable_creation_fn.graph) + + def _write_keras_model_summary(self): + with self._train_writer.as_default(): + if self.model.__class__.__name__ == 'Functional' or self.model.__class__.__name__ == 'Sequential': + keras_model_summary('keras', self.model, step=0) + + def _configure_embeddings(self): + from google.protobuf import text_format + from tensorboard.plugins import projector + config = projector.ProjectorConfig() + for layer in self.model.layers: + if isinstance(layer, Embedding): + embedding = config.embeddings.add() + name = 'layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE' + embedding.tensor_name = name + if self.embeddings_metadata is not None: + if isinstance(self.embeddings_metadata, str): + embedding.metadata_path = self.embeddings_metadata + elif layer.name in self.embeddings_metadata.keys(): + embedding.metadata_path = self.embeddings_metadata.pop(layer.name) + if self.embeddings_metadata and (not isinstance(self.embeddings_metadata, str)): + raise ValueError(f'Unrecognized `Embedding` layer names passed to `keras.callbacks.TensorBoard` `embeddings_metadata` argument: {self.embeddings_metadata.keys()}') + config_pbtxt = text_format.MessageToString(config) + path = os.path.join(self._log_write_dir, 'projector_config.pbtxt') + with file_utils.File(path, 'w') as f: + f.write(config_pbtxt) + + def _push_writer(self, writer, step): + if self.update_freq == 'epoch': + return + + def should_record(): + return step % self.update_freq == 0 + summary_context = (writer.as_default(step), self.summary.record_if(should_record)) + self._prev_summary_state.append(summary_context) + summary_context[0].__enter__() + summary_context[1].__enter__() + + def _pop_writer(self): + if self.update_freq == 'epoch': + return + previous_context = self._prev_summary_state.pop() + previous_context[1].__exit__(*sys.exc_info()) + previous_context[0].__exit__(*sys.exc_info()) + + def _close_writers(self): + for writer in self._writers.values(): + writer.close() + + def _init_profile_batch(self, profile_batch): + profile_batch_error_message = f'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {profile_batch}' + if isinstance(profile_batch, str): + profile_batch = str(profile_batch).split(',') + profile_batch = tree.map_structure(int, profile_batch) + if isinstance(profile_batch, int): + self._start_batch = profile_batch + self._stop_batch = profile_batch + elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2: + (self._start_batch, self._stop_batch) = profile_batch + else: + raise ValueError(profile_batch_error_message) + if self._start_batch < 0 or self._stop_batch < self._start_batch: + raise ValueError(profile_batch_error_message) + self._profiler_started = False + if self._start_batch > 0: + self._start_profiler(logdir='') + self._stop_profiler(save=False) + self._is_tracing = False + self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0) + + def on_train_begin(self, logs=None): + self._global_train_batch = 0 + self._previous_epoch_iterations = 0 + self._push_writer(self._train_writer, self._train_step) + + def on_train_end(self, logs=None): + self._pop_writer() + if self._is_tracing: + self._stop_trace() + self._close_writers() + + def on_test_begin(self, logs=None): + self._push_writer(self._val_writer, self._val_step) + + def on_test_end(self, logs=None): + if self.model.optimizer and hasattr(self.model.optimizer, 'iterations'): + with self._val_writer.as_default(): + for (name, value) in logs.items(): + self.summary.scalar('evaluation_' + name + '_vs_iterations', value, step=self.model.optimizer.iterations) + self._pop_writer() + + def _implements_train_batch_hooks(self): + return self._should_trace or self.write_steps_per_second + + def on_train_batch_begin(self, batch, logs=None): + self._global_train_batch += 1 + if self.write_steps_per_second: + self._batch_start_time = time.time() + if not self._should_trace: + return + if self._global_train_batch == self._start_batch: + self._start_trace() + + def on_train_batch_end(self, batch, logs=None): + if self._should_write_train_graph: + self._write_keras_model_train_graph() + self._should_write_train_graph = False + if self.write_steps_per_second: + batch_run_time = time.time() - self._batch_start_time + self.summary.scalar('batch_steps_per_second', 1.0 / batch_run_time, step=self._train_step) + if isinstance(logs, dict): + for (name, value) in logs.items(): + self.summary.scalar('batch_' + name, value, step=self._train_step) + if not self._should_trace: + return + if self._is_tracing and self._global_train_batch >= self._stop_batch: + self._stop_trace() + + def on_epoch_begin(self, epoch, logs=None): + if self.write_steps_per_second: + self._previous_epoch_iterations = ops.convert_to_tensor(self.model.optimizer.iterations, 'float32') + self._epoch_start_time = time.time() + + def on_epoch_end(self, epoch, logs=None): + self._log_epoch_metrics(epoch, logs) + if self.histogram_freq and epoch % self.histogram_freq == 0: + self._log_weights(epoch) + if self.embeddings_freq and epoch % self.embeddings_freq == 0: + self._log_embeddings(epoch) + + def _start_trace(self): + self.summary.trace_on(graph=True, profiler=False) + self._start_profiler(logdir=self.log_dir) + self._is_tracing = True + + def _stop_trace(self, batch=None): + if batch is None: + batch = self._stop_batch + with self._train_writer.as_default(): + self.summary.trace_export(name='batch_%d' % batch, step=batch) + self._stop_profiler() + self._is_tracing = False + + def _collect_learning_rate(self, logs): + if isinstance(self.model.optimizer, Optimizer): + logs['learning_rate'] = float(ops.convert_to_numpy(self.model.optimizer.learning_rate)) + return logs + + def _compute_steps_per_second(self): + current_iteration = self.model.optimizer.iterations + time_since_epoch_begin = time.time() - self._epoch_start_time + current_iteration = ops.convert_to_tensor(current_iteration, 'float32') + time_since_epoch_begin = ops.convert_to_tensor(time_since_epoch_begin, 'float32') + steps_per_second = (current_iteration - self._previous_epoch_iterations) / time_since_epoch_begin + return float(steps_per_second) + + def _log_epoch_metrics(self, epoch, logs): + if not logs: + return + train_logs = {k: v for (k, v) in logs.items() if not k.startswith('val_')} + val_logs = {k: v for (k, v) in logs.items() if k.startswith('val_')} + train_logs = self._collect_learning_rate(train_logs) + if self.write_steps_per_second: + train_logs['steps_per_second'] = self._compute_steps_per_second() + if train_logs: + with self._train_writer.as_default(): + for (name, value) in train_logs.items(): + self.summary.scalar('epoch_' + name, value, step=epoch) + if val_logs: + with self._val_writer.as_default(): + for (name, value) in val_logs.items(): + name = name[4:] + self.summary.scalar('epoch_' + name, value, step=epoch) + + def _log_weights(self, epoch): + with self._train_writer.as_default(): + for layer in self.model.layers: + for weight in layer.weights: + weight_name = weight.name.replace(':', '_') + histogram_weight_name = weight_name + '/histogram' + self.summary.histogram(histogram_weight_name, weight, step=epoch) + if self.write_images: + image_weight_name = weight_name + '/image' + self._log_weight_as_image(weight, image_weight_name, epoch) + self._train_writer.flush() + + def _log_weight_as_image(self, weight, weight_name, epoch): + w_img = ops.squeeze(weight) + shape = w_img.shape + if len(shape) == 1: + w_img = ops.reshape(w_img, [1, shape[0], 1, 1]) + elif len(shape) == 2: + if shape[0] > shape[1]: + w_img = ops.transpose(w_img) + shape = w_img.shape + w_img = ops.reshape(w_img, [1, shape[0], shape[1], 1]) + elif len(shape) == 3: + if backend.image_data_format() == 'channels_last': + w_img = ops.transpose(w_img, [2, 0, 1]) + shape = w_img.shape + w_img = ops.reshape(w_img, [shape[0], shape[1], shape[2], 1]) + w_img = backend.convert_to_numpy(w_img) + shape = w_img.shape + if len(shape) == 4 and shape[-1] in [1, 3, 4]: + self.summary.image(weight_name, w_img, step=epoch) + + def _log_embeddings(self, epoch): + embeddings_ckpt = os.path.join(self._log_write_dir, 'train', f'keras_embedding.ckpt-{epoch}.weights.h5') + self.model.save_weights(embeddings_ckpt) + + def _start_profiler(self, logdir): + if self._profiler_started: + return + try: + backend.tensorboard.start_trace(logdir) + self._profiler_started = True + except Exception as e: + logging.error('Failed to start profiler: %s', e) + + def _stop_profiler(self, save=True): + if not self._profiler_started: + return + try: + backend.tensorboard.stop_trace(save=save) + except Exception as e: + logging.error('Failed to stop profiler: %s', e) + finally: + self._profiler_started = False + +def keras_model_summary(name, data, step=None): + import tensorflow.summary as summary + from tensorflow.compat.v1 import SummaryMetadata + summary_metadata = SummaryMetadata() + summary_metadata.plugin_data.plugin_name = 'graph_keras_model' + summary_metadata.plugin_data.content = b'1' + try: + json_string = data.to_json() + except Exception as exc: + warnings.warn(f'Model failed to serialize as JSON. Ignoring... {exc}') + return False + with summary.experimental.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _): + return summary.write(tag=tag, tensor=json_string, step=step, metadata=summary_metadata) + +# File: keras-master/keras/src/callbacks/terminate_on_nan.py +import numpy as np +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils + +@keras_export('keras.callbacks.TerminateOnNaN') +class TerminateOnNaN(Callback): + + def on_batch_end(self, batch, logs=None): + logs = logs or {} + loss = logs.get('loss') + if loss is not None: + if np.isnan(loss) or np.isinf(loss): + io_utils.print_msg(f'Batch {batch}: Invalid loss, terminating training') + self.model.stop_training = True + +# File: keras-master/keras/src/constraints/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import UnitNorm +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case +ALL_OBJECTS = {Constraint, MaxNorm, MinMaxNorm, NonNeg, UnitNorm} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}) + +@keras_export('keras.constraints.serialize') +def serialize(constraint): + return serialization_lib.serialize_keras_object(constraint) + +@keras_export('keras.constraints.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.constraints.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f'Could not interpret constraint identifier: {identifier}') + +# File: keras-master/keras/src/constraints/constraints.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export + +@keras_export('keras.constraints.Constraint') +class Constraint: + + def __call__(self, w): + return w + + def get_config(self): + return {} + + @classmethod + def from_config(cls, config): + return cls(**config) + +@keras_export(['keras.constraints.MaxNorm', 'keras.constraints.max_norm']) +class MaxNorm(Constraint): + + def __init__(self, max_value=2, axis=0): + self.max_value = max_value + self.axis = axis + + def __call__(self, w): + w = backend.convert_to_tensor(w) + norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True)) + desired = ops.clip(norms, 0, self.max_value) + return w * (desired / (backend.epsilon() + norms)) + + def get_config(self): + return {'max_value': self.max_value, 'axis': self.axis} + +@keras_export(['keras.constraints.NonNeg', 'keras.constraints.non_neg']) +class NonNeg(Constraint): + + def __call__(self, w): + w = backend.convert_to_tensor(w) + return w * ops.cast(ops.greater_equal(w, 0.0), dtype=w.dtype) + +@keras_export(['keras.constraints.UnitNorm', 'keras.constraints.unit_norm']) +class UnitNorm(Constraint): + + def __init__(self, axis=0): + self.axis = axis + + def __call__(self, w): + w = backend.convert_to_tensor(w) + return w / (backend.epsilon() + ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))) + + def get_config(self): + return {'axis': self.axis} + +@keras_export(['keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm']) +class MinMaxNorm(Constraint): + + def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0): + self.min_value = min_value + self.max_value = max_value + self.rate = rate + self.axis = axis + + def __call__(self, w): + w = backend.convert_to_tensor(w) + norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True)) + desired = self.rate * ops.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms + return w * (desired / (backend.epsilon() + norms)) + + def get_config(self): + return {'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis} + +# File: keras-master/keras/src/datasets/boston_housing.py +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.boston_housing.load_data') +def load_data(path='boston_housing.npz', test_split=0.2, seed=113): + assert 0 <= test_split < 1 + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(path, origin=origin_folder + 'boston_housing.npz', file_hash='f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5') + with np.load(path, allow_pickle=True) as f: + x = f['x'] + y = f['y'] + rng = np.random.RandomState(seed) + indices = np.arange(len(x)) + rng.shuffle(indices) + x = x[indices] + y = y[indices] + x_train = np.array(x[:int(len(x) * (1 - test_split))]) + y_train = np.array(y[:int(len(x) * (1 - test_split))]) + x_test = np.array(x[int(len(x) * (1 - test_split)):]) + y_test = np.array(y[int(len(x) * (1 - test_split)):]) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/california_housing.py +"""""" +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.california_housing.load_data') +def load_data(version='large', path='california_housing.npz', test_split=0.2, seed=113): + assert 0 <= test_split < 1 + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(path, origin=origin_folder + 'california_housing.npz', file_hash='1a2e3a52e0398de6463aebe6f4a8da34fb21fbb6b934cf88c3425e766f2a1a6f') + with np.load(path, allow_pickle=True) as f: + x = f['x'] + y = f['y'] + if version == 'small': + x = x[:600] + y = y[:600] + elif version != 'large': + raise ValueError(f"Argument `version` must be one of 'small', 'large'. Received: version={version}") + rng = np.random.RandomState(seed) + indices = np.arange(len(x)) + rng.shuffle(indices) + x = x[indices] + y = y[indices] + x_train = np.array(x[:int(len(x) * (1 - test_split))]) + y_train = np.array(y[:int(len(x) * (1 - test_split))]) + x_test = np.array(x[int(len(x) * (1 - test_split)):]) + y_test = np.array(y[int(len(x) * (1 - test_split)):]) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/cifar.py +"""""" +import _pickle as cPickle + +def load_batch(fpath, label_key='labels'): + with open(fpath, 'rb') as f: + d = cPickle.load(f, encoding='bytes') + d_decoded = {} + for (k, v) in d.items(): + d_decoded[k.decode('utf8')] = v + d = d_decoded + data = d['data'] + labels = d[label_key] + data = data.reshape(data.shape[0], 3, 32, 32) + return (data, labels) + +# File: keras-master/keras/src/datasets/cifar10.py +"""""" +import os +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.datasets.cifar import load_batch +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.cifar10.load_data') +def load_data(): + dirname = 'cifar-10-batches-py-target' + origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + path = get_file(fname=dirname, origin=origin, extract=True, file_hash='6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce') + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + path = os.path.join(path, 'cifar-10-batches-py') + for i in range(1, 6): + fpath = os.path.join(path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + fpath = os.path.join(path, 'test_batch') + (x_test, y_test) = load_batch(fpath) + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + if backend.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/cifar100.py +"""""" +import os +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.datasets.cifar import load_batch +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.cifar100.load_data') +def load_data(label_mode='fine'): + if label_mode not in ['fine', 'coarse']: + raise ValueError(f'`label_mode` must be one of `"fine"`, `"coarse"`. Received: label_mode={label_mode}.') + dirname = 'cifar-100-python-target' + origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' + path = get_file(fname=dirname, origin=origin, extract=True, file_hash='85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7') + path = os.path.join(path, 'cifar-100-python') + fpath = os.path.join(path, 'train') + (x_train, y_train) = load_batch(fpath, label_key=label_mode + '_labels') + fpath = os.path.join(path, 'test') + (x_test, y_test) = load_batch(fpath, label_key=label_mode + '_labels') + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + if backend.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/fashion_mnist.py +"""""" +import gzip +import os +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.fashion_mnist.load_data') +def load_data(): + dirname = os.path.join('datasets', 'fashion-mnist') + base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'] + paths = [] + for fname in files: + paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname)) + with gzip.open(paths[0], 'rb') as lbpath: + y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) + with gzip.open(paths[1], 'rb') as imgpath: + x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) + with gzip.open(paths[2], 'rb') as lbpath: + y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) + with gzip.open(paths[3], 'rb') as imgpath: + x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/imdb.py +"""""" +import json +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file +from keras.src.utils.python_utils import remove_long_seq + +@keras_export('keras.datasets.imdb.load_data') +def load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(fname=path, origin=origin_folder + 'imdb.npz', file_hash='69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f') + with np.load(path, allow_pickle=True) as f: + (x_train, labels_train) = (f['x_train'], f['y_train']) + (x_test, labels_test) = (f['x_test'], f['y_test']) + rng = np.random.RandomState(seed) + indices = np.arange(len(x_train)) + rng.shuffle(indices) + x_train = x_train[indices] + labels_train = labels_train[indices] + indices = np.arange(len(x_test)) + rng.shuffle(indices) + x_test = x_test[indices] + labels_test = labels_test[indices] + if start_char is not None: + x_train = [[start_char] + [w + index_from for w in x] for x in x_train] + x_test = [[start_char] + [w + index_from for w in x] for x in x_test] + elif index_from: + x_train = [[w + index_from for w in x] for x in x_train] + x_test = [[w + index_from for w in x] for x in x_test] + else: + x_train = [[w for w in x] for x in x_train] + x_test = [[w for w in x] for x in x_test] + if maxlen: + (x_train, labels_train) = remove_long_seq(maxlen, x_train, labels_train) + (x_test, labels_test) = remove_long_seq(maxlen, x_test, labels_test) + if not x_train or not x_test: + raise ValueError(f'After filtering for sequences shorter than maxlen={str(maxlen)}, no sequence was kept. Increase maxlen.') + xs = x_train + x_test + labels = np.concatenate([labels_train, labels_test]) + if not num_words: + num_words = max((max(x) for x in xs)) + if oov_char is not None: + xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] + else: + xs = [[w for w in x if skip_top <= w < num_words] for x in xs] + idx = len(x_train) + (x_train, y_train) = (np.array(xs[:idx], dtype='object'), labels[:idx]) + (x_test, y_test) = (np.array(xs[idx:], dtype='object'), labels[idx:]) + return ((x_train, y_train), (x_test, y_test)) + +@keras_export('keras.datasets.imdb.get_word_index') +def get_word_index(path='imdb_word_index.json'): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(fname=path, origin=origin_folder + 'imdb_word_index.json', file_hash='bfafd718b763782e994055a2d397834f') + with open(path) as f: + return json.load(f) + +# File: keras-master/keras/src/datasets/mnist.py +"""""" +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file + +@keras_export('keras.datasets.mnist.load_data') +def load_data(path='mnist.npz'): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(fname=path, origin=origin_folder + 'mnist.npz', file_hash='731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1') + with np.load(path, allow_pickle=True) as f: + (x_train, y_train) = (f['x_train'], f['y_train']) + (x_test, y_test) = (f['x_test'], f['y_test']) + return ((x_train, y_train), (x_test, y_test)) + +# File: keras-master/keras/src/datasets/reuters.py +"""""" +import json +import numpy as np +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file +from keras.src.utils.python_utils import remove_long_seq + +@keras_export('keras.datasets.reuters.load_data') +def load_data(path='reuters.npz', num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(fname=path, origin=origin_folder + 'reuters.npz', file_hash='d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916') + with np.load(path, allow_pickle=True) as f: + (xs, labels) = (f['x'], f['y']) + rng = np.random.RandomState(seed) + indices = np.arange(len(xs)) + rng.shuffle(indices) + xs = xs[indices] + labels = labels[indices] + if start_char is not None: + xs = [[start_char] + [w + index_from for w in x] for x in xs] + elif index_from: + xs = [[w + index_from for w in x] for x in xs] + if maxlen: + (xs, labels) = remove_long_seq(maxlen, xs, labels) + if not num_words: + num_words = max((max(x) for x in xs)) + if oov_char is not None: + xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] + else: + xs = [[w for w in x if skip_top <= w < num_words] for x in xs] + idx = int(len(xs) * (1 - test_split)) + (x_train, y_train) = (np.array(xs[:idx], dtype='object'), np.array(labels[:idx])) + (x_test, y_test) = (np.array(xs[idx:], dtype='object'), np.array(labels[idx:])) + return ((x_train, y_train), (x_test, y_test)) + +@keras_export('keras.datasets.reuters.get_word_index') +def get_word_index(path='reuters_word_index.json'): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + path = get_file(path, origin=origin_folder + 'reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921') + with open(path) as f: + return json.load(f) + +@keras_export('keras.datasets.reuters.get_label_names') +def get_label_names(): + return ('cocoa', 'grain', 'veg-oil', 'earn', 'acq', 'wheat', 'copper', 'housing', 'money-supply', 'coffee', 'sugar', 'trade', 'reserves', 'ship', 'cotton', 'carcass', 'crude', 'nat-gas', 'cpi', 'money-fx', 'interest', 'gnp', 'meal-feed', 'alum', 'oilseed', 'gold', 'tin', 'strategic-metal', 'livestock', 'retail', 'ipi', 'iron-steel', 'rubber', 'heat', 'jobs', 'lei', 'bop', 'zinc', 'orange', 'pet-chem', 'dlr', 'gas', 'silver', 'wpi', 'hog', 'lead') + +# File: keras-master/keras/src/distribution/__init__.py +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import Distribution +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution + +# File: keras-master/keras/src/distribution/distribution_lib.py +"""""" +import collections +import contextlib +import os +import re +import warnings +import numpy as np +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import distribution_lib +from keras.src.backend.common import global_state +DEFAULT_BATCH_DIM_NAME = 'batch' +GLOBAL_ATTRIBUTE_NAME = 'distribution' + +@keras_export('keras.distribution.list_devices') +def list_devices(device_type=None): + return distribution_lib.list_devices(device_type) + +@keras_export('keras.distribution.initialize') +def initialize(job_addresses=None, num_processes=None, process_id=None): + if job_addresses is None and 'KERAS_DISTRIBUTION_JOB_ADDRESSES' in os.environ: + job_addresses = os.environ['KERAS_DISTRIBUTION_JOB_ADDRESSES'] + if num_processes is None and 'KERAS_DISTRIBUTION_NUM_PROCESSES' in os.environ: + num_processes = int(os.environ['KERAS_DISTRIBUTION_NUM_PROCESSES']) + if process_id is None and 'KERAS_DISTRIBUTION_PROCESS_ID' in os.environ: + process_id = int(os.environ['KERAS_DISTRIBUTION_PROCESS_ID']) + distribution_lib.initialize(job_addresses, num_processes, process_id) + +@keras_export('keras.distribution.DeviceMesh') +class DeviceMesh: + + def __init__(self, shape, axis_names, devices=None): + if not shape or not axis_names: + raise ValueError(f'Shape and axis_names cannot be empty. Received: shape={shape}, axis_names={axis_names}') + if len(shape) != len(axis_names): + raise ValueError(f'Shape and axis_names should have same size. Received: shape={shape}, axis_names={axis_names}') + if devices is None: + devices = list_devices() + devices = np.array(devices) + if np.prod(shape) != np.prod(devices.shape): + raise ValueError(f'Shape does not match the number of devices. Received: shape={shape}; devices.shape={devices.shape}') + self._shape = shape + self._axis_names = axis_names + self._devices = np.reshape(devices, shape) + + @property + def shape(self): + return self._shape + + @property + def axis_names(self): + return self._axis_names + + @property + def devices(self): + return self._devices + + def __repr__(self): + return f'<{self.__class__.__name__} shape={self.shape}, axis_names={self.axis_names}>' + + def __str__(self): + return self.__repr__() + +@keras_export('keras.distribution.TensorLayout') +class TensorLayout: + + def __init__(self, axes, device_mesh=None): + self._axes = tuple(axes) + self._device_mesh = device_mesh + self._validate_axes() + + @property + def axes(self): + return self._axes + + @property + def device_mesh(self): + return self._device_mesh + + @device_mesh.setter + def device_mesh(self, device_mesh): + if self._device_mesh is not None: + raise ValueError(f'Cannot override device mesh value. Existing value is {self._device_mesh}') + self._device_mesh = device_mesh + self._validate_axes() + + def _validate_axes(self): + if self._device_mesh: + valid_axis_names = set(self._device_mesh.axis_names) + axis_names = set(self._axes) - set([None]) + if axis_names - valid_axis_names: + raise ValueError(f'Invalid axis names for Layout. Valid axis names: {valid_axis_names}, Got {axis_names}') + + def __repr__(self): + return f'<{self.__class__.__name__} axes={self.axes}, device_mesh={self.device_mesh}>' + + def __str__(self): + return self.__repr__() + +class Distribution: + + def __init__(self, device_mesh): + self._device_mesh = device_mesh + + def get_data_layout(self, data_shape): + raise NotImplementedError() + + def get_variable_layout(self, variable): + raise NotImplementedError() + + def get_tensor_layout(self, path): + raise NotImplementedError() + + @contextlib.contextmanager + def scope(self): + original_scope = distribution() + set_distribution(self) + try: + yield + finally: + set_distribution(original_scope) + + @property + def device_mesh(self): + return self._device_mesh + + def distribute_dataset(self, dataset): + raise NotImplementedError() + + def __repr__(self): + return f'<{self.__class__.__name__} device_mesh={self.device_mesh}>' + + def __str__(self): + return self.__repr__() + +@keras_export('keras.distribution.DataParallel') +class DataParallel(Distribution): + + def __init__(self, device_mesh=None, devices=None, auto_shard_dataset=True): + if device_mesh: + self._initialize_with_device_mesh(device_mesh) + elif devices: + self._initialize_mesh_from_devices(devices) + else: + self._initialize_mesh_from_list_devices() + self._batch_dim_name = self.device_mesh.axis_names[0] + self._num_process = distribution_lib.num_processes() + self._process_id = distribution_lib.process_id() + self._is_multi_process = self._num_process > 1 + self._auto_shard_dataset = auto_shard_dataset + + def _initialize_with_device_mesh(self, device_mesh): + if not isinstance(device_mesh, DeviceMesh): + raise ValueError(f'Expect `mesh` to be an instance of `DeviceMesh`. Received: mesh={device_mesh} (of type {type(device_mesh)})') + super().__init__(device_mesh) + if self.device_mesh.devices.ndim != 1: + warnings.warn('Expect the input mesh to be 1D, but received mesh.devices.ndim=%d. The first axis will be used for data-parallel sharding.', device_mesh.devices.ndim) + + def _initialize_mesh_from_devices(self, devices): + devices = np.array(devices) + device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices) + super().__init__(device_mesh) + + def _initialize_mesh_from_list_devices(self): + devices = np.array(list_devices()) + device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices) + super().__init__(device_mesh) + + def get_data_layout(self, data_shape): + data_shard_spec = [None] * len(data_shape) + data_shard_spec[0] = self._batch_dim_name + return TensorLayout(data_shard_spec, self.device_mesh) + + def get_variable_layout(self, variable): + variable_shard_spec = [None] * len(variable.shape) + return TensorLayout(variable_shard_spec, self.device_mesh) + + def get_tensor_layout(self, path): + return None + + def distribute_dataset(self, dataset): + from tensorflow.python.data.experimental.ops import distribute as tf_data_distribute + from keras.src.utils.module_utils import tensorflow as tf + if not isinstance(dataset, tf.data.Dataset): + raise ValueError(f'Only `tf.data.Dataset` is supported for sharding, got {type(dataset)}') + if not self._is_multi_process or not self._auto_shard_dataset: + return dataset + batch_size = tf_data_distribute.compute_batch_size(dataset) + if batch_size.numpy() < 0: + raise ValueError('The batch size of the input dataset is unknown. Please config the batch size for the input dataset, e.g via `dataset.batch(batch_size)`') + per_worker_batch_size = tf_data_distribute.batch_sizes_for_worker(global_batch_size=batch_size, num_workers=self._num_process, num_replicas_per_worker=1, worker_index=self._process_id) + distributed_dataset = dataset.rebatch(per_worker_batch_size) + distributed_dataset = tf_data_distribute._AutoShardDataset(distributed_dataset, num_workers=self._num_process, index=self._process_id, num_replicas=self._num_process) + return distributed_dataset.prefetch(tf.data.AUTOTUNE) + +@keras_export('keras.distribution.ModelParallel') +class ModelParallel(Distribution): + + def __init__(self, *, layout_map=None, batch_dim_name=None, **kwargs): + kwargs.pop('device_mesh', None) + if layout_map is None: + raise ValueError('You must specify a layout_map argument.') + if not isinstance(layout_map, LayoutMap): + raise ValueError(f'Argument `layout_map` must be a `LayoutMap` instance. Received: layout_map={layout_map}') + device_mesh = layout_map.device_mesh + super().__init__(device_mesh) + self._layout_map = layout_map + self._batch_dim_name = batch_dim_name or self.device_mesh.axis_names[0] + self._num_process = distribution_lib.num_processes() + self._process_id = distribution_lib.process_id() + self._is_multi_process = self._num_process > 1 + + def get_data_layout(self, data_shape): + data_shard_spec = [None] * len(data_shape) + data_shard_spec[0] = self._batch_dim_name + return TensorLayout(data_shard_spec, self.device_mesh) + + def get_variable_layout(self, variable): + variable_layout = self._layout_map[variable.path] + if variable_layout is not None: + return variable_layout + variable_shard_spec = [None] * len(variable.shape) + return TensorLayout(variable_shard_spec, self.device_mesh) + + def get_tensor_layout(self, path): + return self._layout_map[path] + + def distribute_dataset(self, dataset): + from tensorflow.python.data.experimental.ops import distribute as tf_data_distribute + from keras.src.utils.module_utils import tensorflow as tf + if not isinstance(dataset, tf.data.Dataset): + raise ValueError(f'Only `tf.data.Dataset` is supported for sharding, got {type(dataset)}') + if not self._is_multi_process: + return dataset + global_batch_size = tf_data_distribute.compute_batch_size(dataset) + if global_batch_size.numpy() < 0: + raise ValueError('The batch size of the input dataset is unknown. Please config the batch size for the input dataset, e.g via `dataset.batch(batch_size)`') + mesh_batch_dim_index = self.device_mesh.axis_names.index(self._batch_dim_name) + num_model_replicas = self.device_mesh.shape[mesh_batch_dim_index] + if num_model_replicas == 1: + return dataset.prefetch(tf.data.AUTOTUNE) + num_model_replicas_per_process = num_model_replicas / self._num_process + if num_model_replicas_per_process >= 1: + if global_batch_size % self._num_process != 0: + raise ValueError(f'Global batch size must be divisible by the number of processes. `global_batch_size`={global_batch_size} and `num_process`={self._num_process}') + per_process_batch_size = global_batch_size // self._num_process + distributed_dataset = dataset.rebatch(per_process_batch_size) + distributed_dataset = distributed_dataset.shard(num_shards=self._num_process, index=self._process_id) + return distributed_dataset.prefetch(tf.data.AUTOTUNE) + else: + if global_batch_size % num_model_replicas != 0: + raise ValueError(f'Global batch size must be divisible by the number of replicas. `global_batch_size`={global_batch_size} and `num_model_replicas`={num_model_replicas}') + per_process_batch_size = global_batch_size // num_model_replicas + distributed_dataset = dataset.rebatch(per_process_batch_size) + processes_per_replica = self._num_process // num_model_replicas + data_shard_id = self._process_id % processes_per_replica + distributed_dataset = distributed_dataset.shard(num_shards=num_model_replicas, index=data_shard_id) + return distributed_dataset.prefetch(tf.data.AUTOTUNE) + +@keras_export('keras.distribution.LayoutMap') +class LayoutMap(collections.abc.MutableMapping): + + def __init__(self, device_mesh): + self._layout_map = collections.OrderedDict() + self._device_mesh = device_mesh + + def __getitem__(self, key): + if key in self._layout_map: + return self._layout_map[key] + matching_keys = [] + for k in self._layout_map: + if re.search(k, key): + matching_keys.append(k) + if len(matching_keys) > 1: + raise ValueError(f"Path '{key}' matches multiple layout specification keys: {matching_keys}. Please make sure each tensor/variable path only matches at most one layout specification key in the LayoutMap.") + elif len(matching_keys) == 1: + return self._layout_map[matching_keys[0]] + return None + + def __setitem__(self, key, layout): + if key in self._layout_map: + raise ValueError(f'{key} already exist in the LayoutMap with value {self._layout_map[key]}. Please make sure to not use duplicated keys.') + if isinstance(layout, tuple): + layout = TensorLayout(axes=layout, device_mesh=None) + if not isinstance(layout, TensorLayout): + raise ValueError(f'{layout} should be a TensorLayout type, got {type(layout)}') + self._maybe_populate_device_mesh(layout) + self._layout_map[key] = layout + + def __delitem__(self, key): + return self._layout_map.pop(key) + + def __len__(self): + return len(self._layout_map) + + def __iter__(self): + return iter(self._layout_map) + + @property + def device_mesh(self): + return self._device_mesh + + def _maybe_populate_device_mesh(self, layout): + if layout.device_mesh is None and self.device_mesh is not None: + layout.device_mesh = self.device_mesh +LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__ + +@keras_export('keras.distribution.distribute_tensor') +def distribute_tensor(tensor, layout): + if isinstance(tensor, KerasTensor): + return tensor + return distribution_lib.distribute_tensor(tensor, layout) + +@keras_export('keras.distribution.distribution') +def distribution(): + return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME) + +@keras_export('keras.distribution.set_distribution') +def set_distribution(value): + global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value) + +# File: keras-master/keras/src/dtype_policies/__init__.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.dtype_policies import dtype_policy +from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap +ALL_OBJECTS = {DTypePolicy, FloatDTypePolicy, QuantizedDTypePolicy, QuantizedFloat8DTypePolicy, DTypePolicyMap} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} + +@keras_export('keras.dtype_policies.serialize') +def serialize(dtype_policy): + from keras.src.saving import serialization_lib + return serialization_lib.serialize_keras_object(dtype_policy) + +@keras_export('keras.dtype_policies.deserialize') +def deserialize(config, custom_objects=None): + from keras.src.saving import serialization_lib + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.dtype_policies.get') +def get(identifier): + from keras.src.dtype_policies.dtype_policy import _get_quantized_dtype_policy_by_str + if identifier is None: + return dtype_policy.dtype_policy() + if isinstance(identifier, DTypePolicy): + return identifier + if isinstance(identifier, dict): + return deserialize(identifier) + if isinstance(identifier, str): + if identifier.startswith(QUANTIZATION_MODES): + return _get_quantized_dtype_policy_by_str(identifier) + else: + return DTypePolicy(identifier) + try: + return DTypePolicy(backend.standardize_dtype(identifier)) + except: + raise ValueError(f'Cannot interpret `dtype` argument. Expected a string or an instance of DTypePolicy. Received: dtype={identifier}') + +# File: keras-master/keras/src/dtype_policies/dtype_policy.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +QUANTIZATION_MODES = ('int8', 'float8') + +@keras_export(['keras.DTypePolicy', 'keras.dtype_policies.DTypePolicy', 'keras.mixed_precision.DTypePolicy', 'keras.mixed_precision.Policy']) +class DTypePolicy: + + def __init__(self, name=None): + if name is None: + name = dtype_policy().name + self._name = name + (self._compute_dtype, self._variable_dtype) = self._parse_name(name) + self._quantization_mode = None + + def _parse_name(self, name): + if not isinstance(name, str): + raise TypeError(f"'name' must be a string, such as 'mixed_float16'. Received: name={name} (of type {type(name)})") + if name == 'mixed_float16': + return ('float16', 'float32') + elif name == 'mixed_bfloat16': + return ('bfloat16', 'float32') + try: + dtype = backend.standardize_dtype(name) + return (dtype, dtype) + except ValueError: + raise ValueError(f"Cannot convert '{name}' to a mixed precision DTypePolicy. Valid policies include 'mixed_float16', 'mixed_bfloat16', and the name of any float dtype such as 'float32'.") + + @property + def variable_dtype(self): + return self._variable_dtype + + @property + def compute_dtype(self): + return self._compute_dtype + + @property + def name(self): + return self._name + + @property + def quantization_mode(self): + return self._quantization_mode + + def convert_input(self, x, autocast, dtype): + dtype = backend.standardize_dtype(dtype) + if backend.is_tensor(x): + if self._should_cast(x, autocast, dtype): + x = backend.cast(x, dtype=dtype) + return x + elif backend.is_keras_tensor(x): + if self._should_cast(x, autocast, dtype): + x = ops.cast(x, dtype=dtype) + return x + elif hasattr(x, '__array__'): + try: + x = backend.convert_to_tensor(x) + except TypeError: + x = backend.convert_to_tensor(x, dtype=dtype) + if self._should_cast(x, autocast, dtype): + x = backend.cast(x, dtype=dtype) + return x + return x + + def get_config(self): + return {'name': self.name} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def __repr__(self): + class_name = self.__class__.__name__ + if class_name == 'FloatDTypePolicy': + class_name = 'DTypePolicy' + return f'<{class_name} "{self._name}">' + + def __eq__(self, other): + if self.__class__ in (DTypePolicy, FloatDTypePolicy): + if type(other) not in (DTypePolicy, FloatDTypePolicy): + return False + elif type(other) is not self.__class__: + return False + return self._name == other._name + + def _should_cast(self, x, autocast, dtype): + x_dtype = backend.standardize_dtype(x.dtype) + if autocast and backend.is_float_dtype(x_dtype) and (x_dtype != dtype): + return True + else: + return False + +@keras_export(['keras.FloatDTypePolicy', 'keras.dtype_policies.FloatDTypePolicy']) +class FloatDTypePolicy(DTypePolicy): + pass + +@keras_export('keras.dtype_policies.QuantizedDTypePolicy') +class QuantizedDTypePolicy(DTypePolicy): + + def __init__(self, mode, source_name=None): + if source_name is None: + source_name = dtype_policy().name + name = f'{mode}_from_{source_name}' + (self._compute_dtype, self._variable_dtype) = self._parse_name(source_name) + self._check_quantization_mode(mode, self._compute_dtype) + self._name = name + self._source_name = source_name + self._quantization_mode = mode + + def __eq__(self, other): + if super().__eq__(other) is False: + return False + return self._quantization_mode == other._quantization_mode and self._source_name == other._source_name + + def get_config(self): + return {'mode': self._quantization_mode, 'source_name': self._source_name} + + def _check_quantization_mode(self, mode, compute_dtype): + if mode not in QUANTIZATION_MODES: + raise ValueError(f'Invalid quantization mode. Expected one of {QUANTIZATION_MODES}. Received: mode={mode}') + if compute_dtype == 'float16' and mode == 'int8': + raise ValueError(f"Quantization mode='{mode}' doesn't work well with compute_dtype='float16'.") + +@keras_export('keras.dtype_policies.QuantizedFloat8DTypePolicy') +class QuantizedFloat8DTypePolicy(QuantizedDTypePolicy): + default_amax_history_length = 1024 + + def __init__(self, mode, source_name=None, amax_history_length=1024): + super().__init__(mode=mode, source_name=source_name) + if not isinstance(amax_history_length, int): + raise TypeError(f'`amax_history_length` must be an integer. Received: amax_history_length={amax_history_length}') + self._amax_history_length = amax_history_length + + @property + def amax_history_length(self): + return self._amax_history_length + + def __eq__(self, other): + if super().__eq__(other) is False: + return False + return self._amax_history_length == other._amax_history_length + + def get_config(self): + config = super().get_config() + config.update({'amax_history_length': self.amax_history_length}) + return config + +@keras_export(['keras.config.set_dtype_policy', 'keras.mixed_precision.set_dtype_policy', 'keras.mixed_precision.set_global_policy']) +def set_dtype_policy(policy): + if not isinstance(policy, DTypePolicy): + if isinstance(policy, str): + if policy.startswith(QUANTIZATION_MODES): + policy = _get_quantized_dtype_policy_by_str(policy) + else: + policy = DTypePolicy(policy) + else: + raise ValueError(f"Invalid `policy` argument. Expected the string name of a policy (such as 'mixed_float16') or a `DTypePolicy` instance. Received: policy={policy} (of type {type(policy)})") + global_state.set_global_attribute('dtype_policy', policy) + +@keras_export(['keras.config.dtype_policy', 'keras.mixed_precision.dtype_policy', 'keras.mixed_precision.global_policy']) +def dtype_policy(): + policy = global_state.get_global_attribute('dtype_policy', None) + if policy is None: + policy = DTypePolicy(backend.floatx()) + set_dtype_policy(policy) + return policy + +def _get_quantized_dtype_policy_by_str(policy): + if not isinstance(policy, str): + raise TypeError(f'`policy` must be a string. Received: policy={policy}') + if not policy.startswith(QUANTIZATION_MODES): + raise ValueError('`policy` is incompatible with the current supported quantization.') + split_name = policy.split('_from_') + if len(split_name) != 2: + raise ValueError(f'Cannot convert `policy` into a valid pair (`mode`, `source_name`) to instantiate `QuantizedDTypePolicy`. Received: policy={policy}') + (mode, source_name) = split_name + if policy.startswith('int8'): + return QuantizedDTypePolicy(mode, source_name) + elif policy.startswith('float8'): + return QuantizedFloat8DTypePolicy(mode, source_name) + else: + raise NotImplementedError + +# File: keras-master/keras/src/dtype_policies/dtype_policy_map.py +import re +from collections.abc import MutableMapping +from keras.src import dtype_policies +from keras.src.api_export import keras_export +from keras.src.dtype_policies import DTypePolicy + +@keras_export(['keras.dtype_policies.DTypePolicyMap']) +class DTypePolicyMap(DTypePolicy, MutableMapping): + + def __init__(self, default_policy=None, policy_map=None): + if isinstance(default_policy, DTypePolicyMap): + raise ValueError('`default_policy` cannot be a `DTypePolicyMap`.') + if policy_map is not None and (not isinstance(policy_map, dict)): + raise TypeError(f'If specified, `policy_map` must be a dict. Received: policy_map={policy_map} of type {type(policy_map)}') + self._default_policy_arg = default_policy + self._default_policy = dtype_policies.get(default_policy) + self._policy_map = policy_map or dict() + + @property + def name(self): + return 'map_' + self.default_policy._name + + @property + def default_policy(self): + return dtype_policies.get(self._default_policy) + + @property + def variable_dtype(self): + return self.default_policy.variable_dtype + + @property + def compute_dtype(self): + return self.default_policy.compute_dtype + + @property + def quantization_mode(self): + return self.default_policy.quantization_mode + + def __getitem__(self, key): + if key in self._policy_map: + return self._policy_map[key] + matching_keys = [] + for k in self._policy_map: + if re.search(k, key): + matching_keys.append(k) + if len(matching_keys) > 1: + raise ValueError(f"Path '{key}' matches multiple dtype policy specification keys: {matching_keys}. Please make sure each path only matches at most one dtype policy specification key in the DTypePolicyMap.") + elif len(matching_keys) == 1: + return self._policy_map[matching_keys[0]] + return self.default_policy + + def __setitem__(self, key, policy): + if key in self._policy_map: + raise ValueError(f'{key} already exist in the DTypePolicyMap with value {self._policy_map[key]}. Please make sure to not use duplicated keys.') + try: + policy = dtype_policies.get(policy) + except Exception: + raise ValueError(f'Cannot interpret the assigned value by `keras.dtype_policies.get`. Received: {policy} of type {type(policy)}') + self._policy_map[key] = policy + + def __delitem__(self, key): + return self._policy_map.pop(key) + + def __contains__(self, key): + return key in self._policy_map + + def get_config(self): + from keras.src.saving import serialization_lib + policy_map = self._policy_map + if self._default_policy_arg is None: + for policy in policy_map.values(): + if isinstance(policy, dtype_policies.QuantizedDTypePolicy): + policy._name = None + policy._source_name = None + elif isinstance(policy, dtype_policies.DTypePolicy): + policy._name = None + return {'default_policy': self._default_policy_arg, 'policy_map': serialization_lib.serialize_keras_object(policy_map)} + + @classmethod + def from_config(cls, config, custom_objects=None): + from keras.src.saving import serialization_lib + config = config.copy() + config['policy_map'] = serialization_lib.deserialize_keras_object(config['policy_map'], custom_objects=custom_objects) + return cls(**config) + + def __len__(self): + return len(self._policy_map) + + def __iter__(self): + return iter(self._policy_map) + + def __repr__(self): + default_policy = self._default_policy.name if self._default_policy is not None else None + mapping = [] + for (k, v) in self._policy_map.items(): + mapping.append((k, v.name)) + return f'' + +# File: keras-master/keras/src/export/export_lib.py +"""""" +import inspect +import itertools +import string +from absl import logging +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.layers import Layer +from keras.src.models import Functional +from keras.src.models import Sequential +from keras.src.utils import io_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.export.ExportArchive') +class ExportArchive: + + def __init__(self): + self._endpoint_names = [] + self._endpoint_signatures = {} + self.tensorflow_version = tf.__version__ + self._tf_trackable = tf.__internal__.tracking.AutoTrackable() + self._tf_trackable.variables = [] + self._tf_trackable.trainable_variables = [] + self._tf_trackable.non_trainable_variables = [] + if backend.backend() == 'jax': + self._backend_variables = [] + self._backend_trainable_variables = [] + self._backend_non_trainable_variables = [] + if backend.backend() not in ('tensorflow', 'jax'): + raise NotImplementedError('The export API is only compatible with JAX and TF backends.') + + @property + def variables(self): + return self._tf_trackable.variables + + @property + def trainable_variables(self): + return self._tf_trackable.trainable_variables + + @property + def non_trainable_variables(self): + return self._tf_trackable.non_trainable_variables + + def track(self, resource): + if backend.backend() == 'tensorflow' and (not isinstance(resource, tf.__internal__.tracking.Trackable)): + raise ValueError(f"Invalid resource type. Expected an instance of a TensorFlow `Trackable` (such as a Keras `Layer` or `Model`). Received instead an object of type '{type(resource)}'. Object received: {resource}") + if backend.backend() == 'jax' and (not isinstance(resource, backend.jax.layer.JaxLayer)): + raise ValueError(f"Invalid resource type. Expected an instance of a JAX-based Keras `Layer` or `Model`. Received instead an object of type '{type(resource)}'. Object received: {resource}") + if isinstance(resource, Layer): + if not resource.built: + raise ValueError('The layer provided has not yet been built. It must be built before export.') + if not hasattr(self, '_tracked'): + self._tracked = [] + self._tracked.append(resource) + if isinstance(resource, Layer): + if backend.backend() == 'jax': + trainable_variables = tree.flatten(resource.trainable_variables) + non_trainable_variables = tree.flatten(resource.non_trainable_variables) + self._backend_trainable_variables += trainable_variables + self._backend_non_trainable_variables += non_trainable_variables + self._backend_variables = self._backend_trainable_variables + self._backend_non_trainable_variables + self._tf_trackable.trainable_variables += [tf.Variable(v) for v in trainable_variables] + self._tf_trackable.non_trainable_variables += [tf.Variable(v) for v in non_trainable_variables] + self._tf_trackable.variables = self._tf_trackable.trainable_variables + self._tf_trackable.non_trainable_variables + else: + self._tf_trackable.variables += resource.variables + self._tf_trackable.trainable_variables += resource.trainable_variables + self._tf_trackable.non_trainable_variables += resource.non_trainable_variables + + def add_endpoint(self, name, fn, input_signature=None, jax2tf_kwargs=None): + if name in self._endpoint_names: + raise ValueError(f"Endpoint name '{name}' is already taken.") + if jax2tf_kwargs and backend.backend() != 'jax': + raise ValueError(f"'jax2tf_kwargs' is only supported with the jax backend. Current backend: {backend.backend()}") + if input_signature: + if backend.backend() == 'tensorflow': + decorated_fn = tf.function(fn, input_signature=input_signature, autograph=False) + else: + + def stateless_fn(variables, *args, **kwargs): + state_mapping = zip(self._backend_variables, variables) + with StatelessScope(state_mapping=state_mapping) as scope: + output = fn(*args, **kwargs) + non_trainable_variables = [] + for var in self._backend_non_trainable_variables: + new_value = scope.get_current_value(var) + non_trainable_variables.append(new_value) + return (output, non_trainable_variables) + jax2tf_stateless_fn = self._convert_jax2tf_function(stateless_fn, input_signature, jax2tf_kwargs=jax2tf_kwargs) + + def stateful_fn(*args, **kwargs): + (output, non_trainable_variables) = jax2tf_stateless_fn(list(self._tf_trackable.variables), *args, **kwargs) + for (var, new_value) in zip(self._tf_trackable.non_trainable_variables, non_trainable_variables): + var.assign(new_value) + return output + fn_signature = inspect.signature(fn) + fn_parameters = list(fn_signature.parameters.values()) + stateful_fn.__signature__ = inspect.Signature(parameters=fn_parameters[0:len(input_signature)], return_annotation=fn_signature.return_annotation) + decorated_fn = tf.function(stateful_fn, input_signature=input_signature, autograph=False) + self._endpoint_signatures[name] = input_signature + elif isinstance(fn, tf.types.experimental.GenericFunction): + if not fn._list_all_concrete_functions(): + raise ValueError(f"The provided tf.function '{fn}' has never been called. To specify the expected shape and dtype of the function's arguments, you must either provide a function that has been called at least once, or alternatively pass an `input_signature` argument in `add_endpoint()`.") + decorated_fn = fn + else: + raise ValueError("If the `fn` argument provided is not a `tf.function`, you must provide an `input_signature` argument to specify the shape and dtype of the function arguments. Example:\n\nexport_archive.add_endpoint(\n name='call',\n fn=model.call,\n input_signature=[\n tf.TensorSpec(\n shape=(None, 224, 224, 3),\n dtype=tf.float32,\n )\n ],\n)") + setattr(self._tf_trackable, name, decorated_fn) + self._endpoint_names.append(name) + return decorated_fn + + def add_variable_collection(self, name, variables): + if not isinstance(variables, (list, tuple, set)): + raise ValueError(f"Expected `variables` to be a list/tuple/set. Received instead object of type '{type(variables)}'.") + if not all((isinstance(v, (tf.Variable, backend.Variable)) for v in variables)): + raise ValueError(f'Expected all elements in `variables` to be `tf.Variable` instances. Found instead the following types: {list(set((type(v) for v in variables)))}') + if backend.backend() == 'jax': + variables = tree.flatten(tree.map_structure(tf.Variable, variables)) + setattr(self._tf_trackable, name, list(variables)) + + def write_out(self, filepath, options=None, verbose=True): + if not self._endpoint_names: + raise ValueError('No endpoints have been set yet. Call add_endpoint().') + if backend.backend() == 'tensorflow': + self._filter_and_track_resources() + signatures = {} + for name in self._endpoint_names: + signatures[name] = self._get_concrete_fn(name) + if 'serving_default' not in self._endpoint_names: + signatures['serving_default'] = self._get_concrete_fn(self._endpoint_names[0]) + tf.saved_model.save(self._tf_trackable, filepath, options=options, signatures=signatures) + endpoints = '\n\n'.join((_print_signature(getattr(self._tf_trackable, name), name, verbose=verbose) for name in self._endpoint_names)) + io_utils.print_msg(f"Saved artifact at '{filepath}'. The following endpoints are available:\n\n{endpoints}") + + def _get_concrete_fn(self, endpoint): + if endpoint in self._endpoint_signatures: + return getattr(self._tf_trackable, endpoint) + else: + traces = getattr(self._tf_trackable, endpoint)._trackable_children('saved_model') + return list(traces.values())[0] + + def _get_variables_used_by_endpoints(self): + fns = [self._get_concrete_fn(name) for name in self._endpoint_names] + return _list_variables_used_by_fns(fns) + + def _filter_and_track_resources(self): + fns = [self._get_concrete_fn(name) for name in self._endpoint_names] + (tvs, ntvs) = _list_variables_used_by_fns(fns) + self._tf_trackable._all_variables = list(tvs + ntvs) + self._tf_trackable._misc_assets = [] + from keras.src.layers import IntegerLookup + from keras.src.layers import StringLookup + from keras.src.layers import TextVectorization + if hasattr(self, '_tracked'): + for root in self._tracked: + descendants = tf.train.TrackableView(root).descendants() + for trackable in descendants: + if isinstance(trackable, (IntegerLookup, StringLookup, TextVectorization)): + self._tf_trackable._misc_assets.append(trackable) + + def _convert_jax2tf_function(self, fn, input_signature, jax2tf_kwargs=None): + from jax.experimental import jax2tf + if jax2tf_kwargs is None: + jax2tf_kwargs = {} + if 'native_serialization' not in jax2tf_kwargs: + jax2tf_kwargs['native_serialization'] = self._check_device_compatible() + variables_shapes = self._to_polymorphic_shape(self._backend_variables, allow_none=False) + if 'polymorphic_shapes' in jax2tf_kwargs: + input_shapes = jax2tf_kwargs['polymorphic_shapes'] + else: + input_shapes = self._to_polymorphic_shape(input_signature) + jax2tf_kwargs['polymorphic_shapes'] = [variables_shapes] + input_shapes + return jax2tf.convert(fn, **jax2tf_kwargs) + + def _to_polymorphic_shape(self, struct, allow_none=True): + if allow_none: + dim_names = itertools.chain(string.ascii_lowercase, itertools.starmap(lambda a, b: a + b, itertools.product(string.ascii_lowercase, repeat=2))) + + def convert_shape(x): + poly_shape = [] + for (index, dim) in enumerate(list(x.shape)): + if dim is not None: + poly_shape.append(str(dim)) + elif not allow_none: + raise ValueError(f'Illegal None dimension in {x} with shape {x.shape}') + elif index == 0: + poly_shape.append('batch') + else: + poly_shape.append(next(dim_names)) + return '(' + ', '.join(poly_shape) + ')' + return tree.map_structure(convert_shape, struct) + + def _check_device_compatible(self): + from jax import default_backend as jax_device + if jax_device() == 'gpu' and len(tf.config.list_physical_devices('GPU')) == 0: + logging.warning('JAX backend is using GPU for export, but installed TF package cannot access GPU, so reloading the model with the TF runtime in the same environment will not work. To use JAX-native serialization for high-performance export and serving, please install `tensorflow-gpu` and ensure CUDA version compatibility between your JAX and TF installations.') + return False + else: + return True + +def export_model(model, filepath, verbose=True): + export_archive = ExportArchive() + export_archive.track(model) + if isinstance(model, (Functional, Sequential)): + input_signature = tree.map_structure(_make_tensor_spec, model.inputs) + if isinstance(input_signature, list) and len(input_signature) > 1: + input_signature = [input_signature] + export_archive.add_endpoint('serve', model.__call__, input_signature) + else: + input_signature = _get_input_signature(model) + if not input_signature or not model._called: + raise ValueError('The model provided has never called. It must be called at least once before export.') + export_archive.add_endpoint('serve', model.__call__, input_signature) + export_archive.write_out(filepath, verbose=verbose) + +def _get_input_signature(model): + shapes_dict = getattr(model, '_build_shapes_dict', None) + if not shapes_dict: + return None + + def make_tensor_spec(structure): + if isinstance(structure, dict): + return {k: make_tensor_spec(v) for (k, v) in structure.items()} + elif isinstance(structure, tuple): + if all((isinstance(d, (int, type(None))) for d in structure)): + return tf.TensorSpec(shape=(None,) + structure[1:], dtype=model.input_dtype) + return tuple((make_tensor_spec(v) for v in structure)) + elif isinstance(structure, list): + if all((isinstance(d, (int, type(None))) for d in structure)): + return tf.TensorSpec(shape=[None] + structure[1:], dtype=model.input_dtype) + return [make_tensor_spec(v) for v in structure] + else: + raise ValueError(f'Unsupported type {type(structure)} for {structure}') + return [make_tensor_spec(value) for value in shapes_dict.values()] + +@keras_export('keras.layers.TFSMLayer') +class TFSMLayer(Layer): + + def __init__(self, filepath, call_endpoint='serve', call_training_endpoint=None, trainable=True, name=None, dtype=None): + if backend.backend() != 'tensorflow': + raise NotImplementedError('The TFSMLayer is only currently supported with the TensorFlow backend.') + super().__init__(trainable=trainable, name=name, dtype=dtype) + self._reloaded_obj = tf.saved_model.load(filepath) + self.filepath = filepath + self.call_endpoint = call_endpoint + self.call_training_endpoint = call_training_endpoint + if hasattr(self._reloaded_obj, call_endpoint): + self.call_endpoint_fn = getattr(self._reloaded_obj, call_endpoint) + elif call_endpoint in self._reloaded_obj.signatures: + self.call_endpoint_fn = self._reloaded_obj.signatures[call_endpoint] + else: + raise ValueError(f"The endpoint '{call_endpoint}' is neither an attribute of the reloaded SavedModel, nor an entry in the `signatures` field of the reloaded SavedModel. Select another endpoint via the `call_endpoint` argument. Available endpoints for this SavedModel: {list(self._reloaded_obj.signatures.keys())}") + if call_training_endpoint: + if hasattr(self._reloaded_obj, call_training_endpoint): + self.call_training_endpoint_fn = getattr(self._reloaded_obj, call_training_endpoint) + elif call_training_endpoint in self._reloaded_obj.signatures: + self.call_training_endpoint_fn = self._reloaded_obj.signatures[call_training_endpoint] + else: + raise ValueError(f"The endpoint '{call_training_endpoint}' is neither an attribute of the reloaded SavedModel, nor an entry in the `signatures` field of the reloaded SavedModel. Available endpoints for this SavedModel: {list(self._reloaded_obj.signatures.keys())}") + all_fns = [self.call_endpoint_fn] + if call_training_endpoint: + all_fns.append(self.call_training_endpoint_fn) + (tvs, ntvs) = _list_variables_used_by_fns(all_fns) + for v in tvs: + self._add_existing_weight(v) + for v in ntvs: + self._add_existing_weight(v) + self.built = True + + def _add_existing_weight(self, weight): + self._track_variable(weight) + + def call(self, inputs, training=False, **kwargs): + if training: + if self.call_training_endpoint: + return self.call_training_endpoint_fn(inputs, **kwargs) + return self.call_endpoint_fn(inputs, **kwargs) + + def get_config(self): + base_config = super().get_config() + config = {'filepath': self.filepath, 'call_endpoint': self.call_endpoint, 'call_training_endpoint': self.call_training_endpoint} + return {**base_config, **config} + +def _make_tensor_spec(x): + shape = (None,) + x.shape[1:] + return tf.TensorSpec(shape, dtype=x.dtype, name=x.name) + +def _print_signature(fn, name, verbose=True): + concrete_fn = fn._list_all_concrete_functions()[0] + pprinted_signature = concrete_fn.pretty_printed_signature(verbose=verbose) + lines = pprinted_signature.split('\n') + lines = [f"* Endpoint '{name}'"] + lines[1:] + endpoint = '\n'.join(lines) + return endpoint + +def _list_variables_used_by_fns(fns): + trainable_variables = [] + non_trainable_variables = [] + trainable_variables_ids = set() + non_trainable_variables_ids = set() + for fn in fns: + if hasattr(fn, 'concrete_functions'): + concrete_functions = fn.concrete_functions + elif hasattr(fn, 'get_concrete_function'): + concrete_functions = [fn.get_concrete_function()] + else: + concrete_functions = [fn] + for concrete_fn in concrete_functions: + for v in concrete_fn.trainable_variables: + if id(v) not in trainable_variables_ids: + trainable_variables.append(v) + trainable_variables_ids.add(id(v)) + for v in concrete_fn.variables: + if id(v) not in trainable_variables_ids and id(v) not in non_trainable_variables_ids: + non_trainable_variables.append(v) + non_trainable_variables_ids.add(id(v)) + return (trainable_variables, non_trainable_variables) + +# File: keras-master/keras/src/initializers/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case +ALL_OBJECTS = {Initializer, Constant, Identity, Ones, Zeros, GlorotNormal, GlorotUniform, HeNormal, HeUniform, LecunNormal, LecunUniform, RandomNormal, TruncatedNormal, RandomUniform, VarianceScaling, OrthogonalInitializer} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}) +ALL_OBJECTS_DICT.update({'uniform': RandomUniform, 'normal': RandomNormal, 'orthogonal': OrthogonalInitializer, 'Orthogonal': OrthogonalInitializer, 'one': Ones, 'zero': Zeros}) + +@keras_export('keras.initializers.serialize') +def serialize(initializer): + return serialization_lib.serialize_keras_object(initializer) + +@keras_export('keras.initializers.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.initializers.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + config = {'class_name': str(identifier), 'config': {}} + obj = deserialize(config) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f'Could not interpret initializer identifier: {identifier}') + +# File: keras-master/keras/src/initializers/constant_initializers.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import standardize_dtype +from keras.src.initializers.initializer import Initializer +from keras.src.saving import serialization_lib + +@keras_export(['keras.initializers.Constant', 'keras.initializers.constant']) +class Constant(Initializer): + + def __init__(self, value=0.0): + self.value = value + + def __call__(self, shape, dtype=None): + dtype = standardize_dtype(dtype) + return ops.cast(self.value, dtype=dtype) * ops.ones(shape=shape, dtype=dtype) + + def get_config(self): + return {'value': serialization_lib.serialize_keras_object(self.value)} + + @classmethod + def from_config(cls, config): + value = serialization_lib.deserialize_keras_object(config['value']) + return cls(value) + +@keras_export(['keras.initializers.Zeros', 'keras.initializers.zeros']) +class Zeros(Initializer): + + def __call__(self, shape, dtype=None): + dtype = standardize_dtype(dtype) + return ops.zeros(shape, dtype=dtype) + +@keras_export(['keras.initializers.Ones', 'keras.initializers.ones']) +class Ones(Initializer): + + def __call__(self, shape, dtype=None): + dtype = standardize_dtype(dtype) + return ops.ones(shape, dtype=dtype) + +@keras_export(['keras.initializers.IdentityInitializer', 'keras.initializers.Identity', 'keras.initializers.identity']) +class Identity(Initializer): + + def __init__(self, gain=1.0): + self.gain = gain + + def __call__(self, shape, dtype=None): + if len(shape) != 2: + raise ValueError(f'Identity matrix initializer can only be used for 2D matrices. Received: shape={shape} of rank {len(shape)}.') + dtype = standardize_dtype(dtype) + return self.gain * ops.eye(*shape, dtype=dtype) + +# File: keras-master/keras/src/initializers/initializer.py +from keras.src.api_export import keras_export + +@keras_export(['keras.Initializer', 'keras.initializers.Initializer']) +class Initializer: + + def __call__(self, shape, dtype=None): + raise NotImplementedError('Initializer subclasses must implement the `__call__()` method.') + + def get_config(self): + return {} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def clone(self): + return self.__class__.from_config(self.get_config()) + +# File: keras-master/keras/src/initializers/random_initializers.py +import math +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import random +from keras.src.initializers.initializer import Initializer +from keras.src.saving import serialization_lib + +class RandomInitializer(Initializer): + + def __init__(self, seed=None): + self._init_seed = seed + if seed is None: + seed = random.make_default_seed() + elif isinstance(seed, dict): + seed = serialization_lib.deserialize_keras_object(seed) + elif not isinstance(seed, (int, random.SeedGenerator)): + raise ValueError(f'`seed` argument should be an instance of `keras.random.SeedGenerator()` or an integer. Received: seed={seed}') + self.seed = seed + + def get_config(self): + seed_config = serialization_lib.serialize_keras_object(self._init_seed) + return {'seed': seed_config} + +@keras_export(['keras.initializers.RandomNormal', 'keras.initializers.random_normal']) +class RandomNormal(RandomInitializer): + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + super().__init__(seed=seed) + + def __call__(self, shape, dtype=None): + return random.normal(shape=shape, mean=self.mean, stddev=self.stddev, seed=self.seed, dtype=dtype) + + def get_config(self): + base_config = super().get_config() + config = {'mean': self.mean, 'stddev': self.stddev} + return {**base_config, **config} + +@keras_export(['keras.initializers.TruncatedNormal', 'keras.initializers.truncated_normal']) +class TruncatedNormal(RandomInitializer): + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + super().__init__(seed=seed) + + def __call__(self, shape, dtype=None): + return random.truncated_normal(shape=shape, mean=self.mean, stddev=self.stddev, seed=self.seed, dtype=dtype) + + def get_config(self): + base_config = super().get_config() + config = {'mean': self.mean, 'stddev': self.stddev} + return {**base_config, **config} + +@keras_export(['keras.initializers.RandomUniform', 'keras.initializers.random_uniform']) +class RandomUniform(RandomInitializer): + + def __init__(self, minval=-0.05, maxval=0.05, seed=None): + self.minval = minval + self.maxval = maxval + super().__init__(seed=seed) + + def __call__(self, shape, dtype=None): + return random.uniform(shape=shape, minval=self.minval, maxval=self.maxval, seed=self.seed, dtype=dtype) + + def get_config(self): + base_config = super().get_config() + config = {'minval': self.minval, 'maxval': self.maxval} + return {**base_config, **config} + +@keras_export(['keras.initializers.VarianceScaling', 'keras.initializers.variance_scaling']) +class VarianceScaling(RandomInitializer): + + def __init__(self, scale=1.0, mode='fan_in', distribution='truncated_normal', seed=None): + if scale <= 0.0: + raise ValueError(f'Argument `scale` must be positive float. Received: scale={scale}') + allowed_modes = {'fan_in', 'fan_out', 'fan_avg'} + if mode not in allowed_modes: + raise ValueError(f'Invalid `mode` argument: {mode}. Please use one of {allowed_modes}') + distribution = distribution.lower() + if distribution == 'normal': + distribution = 'truncated_normal' + allowed_distributions = {'uniform', 'truncated_normal', 'untruncated_normal'} + if distribution not in allowed_distributions: + raise ValueError(f'Invalid `distribution` argument: {distribution}.Please use one of {allowed_distributions}') + self.scale = scale + self.mode = mode + self.distribution = distribution + super().__init__(seed=seed) + + def __call__(self, shape, dtype=None): + scale = self.scale + (fan_in, fan_out) = compute_fans(shape) + if self.mode == 'fan_in': + scale /= max(1.0, fan_in) + elif self.mode == 'fan_out': + scale /= max(1.0, fan_out) + else: + scale /= max(1.0, (fan_in + fan_out) / 2.0) + if self.distribution == 'truncated_normal': + stddev = math.sqrt(scale) / 0.8796256610342398 + return random.truncated_normal(shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed) + elif self.distribution == 'untruncated_normal': + stddev = math.sqrt(scale) + return random.normal(shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed) + else: + limit = math.sqrt(3.0 * scale) + return random.uniform(shape, minval=-limit, maxval=limit, dtype=dtype, seed=self.seed) + + def get_config(self): + base_config = super().get_config() + config = {'scale': self.scale, 'mode': self.mode, 'distribution': self.distribution} + return {**base_config, **config} + +@keras_export(['keras.initializers.GlorotUniform', 'keras.initializers.glorot_uniform']) +class GlorotUniform(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=1.0, mode='fan_avg', distribution='uniform', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +@keras_export(['keras.initializers.GlorotNormal', 'keras.initializers.glorot_normal']) +class GlorotNormal(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=1.0, mode='fan_avg', distribution='truncated_normal', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +@keras_export(['keras.initializers.LecunNormal', 'keras.initializers.lecun_normal']) +class LecunNormal(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=1.0, mode='fan_in', distribution='truncated_normal', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +@keras_export(['keras.initializers.LecunUniform', 'keras.initializers.lecun_uniform']) +class LecunUniform(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=1.0, mode='fan_in', distribution='uniform', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +@keras_export(['keras.initializers.HeNormal', 'keras.initializers.he_normal']) +class HeNormal(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +@keras_export(['keras.initializers.HeUniform', 'keras.initializers.he_uniform']) +class HeUniform(VarianceScaling): + + def __init__(self, seed=None): + super().__init__(scale=2.0, mode='fan_in', distribution='uniform', seed=seed) + + def get_config(self): + return {'seed': serialization_lib.serialize_keras_object(self._init_seed)} + +def compute_fans(shape): + shape = tuple(shape) + if len(shape) < 1: + fan_in = fan_out = 1 + elif len(shape) == 1: + fan_in = fan_out = shape[0] + elif len(shape) == 2: + fan_in = shape[0] + fan_out = shape[1] + else: + receptive_field_size = 1 + for dim in shape[:-2]: + receptive_field_size *= dim + fan_in = shape[-2] * receptive_field_size + fan_out = shape[-1] * receptive_field_size + return (int(fan_in), int(fan_out)) + +@keras_export(['keras.initializers.OrthogonalInitializer', 'keras.initializers.Orthogonal', 'keras.initializers.orthogonal']) +class OrthogonalInitializer(RandomInitializer): + + def __init__(self, gain=1.0, seed=None): + self.gain = gain + super().__init__(seed=seed) + + def __call__(self, shape, dtype=None): + if len(shape) < 2: + raise ValueError(f'The tensor to initialize must be at least two-dimensional. Received: shape={shape} of rank {len(shape)}.') + num_rows = 1 + for dim in shape[:-1]: + num_rows *= dim + num_cols = shape[-1] + flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) + a = random.normal(flat_shape, seed=self.seed, dtype=dtype) + (q, r) = ops.qr(a) + d = ops.diag(r) + q *= ops.sign(d) + if num_rows < num_cols: + q = ops.transpose(q) + return self.gain * ops.reshape(q, shape) + + def get_config(self): + base_config = super().get_config() + config = {'gain': self.gain} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/__init__.py +from keras.src.api_export import keras_export +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import GroupedQueryAttention +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import BatchNormalization +from keras.src.layers.normalization.group_normalization import GroupNormalization +from keras.src.layers.normalization.layer_normalization import LayerNormalization +from keras.src.layers.normalization.spectral_normalization import SpectralNormalization +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D +from keras.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D +from keras.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import AutoContrast +from keras.src.layers.preprocessing.image_preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.image_preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.image_preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.image_preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.image_preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.image_preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.image_preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.image_preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.image_preprocessing.solarization import Solarization +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ActivityRegularization +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.saving import serialization_lib + +@keras_export('keras.layers.serialize') +def serialize(layer): + return serialization_lib.serialize_keras_object(layer) + +@keras_export('keras.layers.deserialize') +def deserialize(config, custom_objects=None): + obj = serialization_lib.deserialize_keras_object(config, custom_objects=custom_objects) + if not isinstance(obj, Layer): + raise ValueError(f'`keras.layers.deserialize` was passed a `config` object that is not a `keras.layers.Layer`. Received: {config}') + return obj + +# File: keras-master/keras/src/layers/activations/activation.py +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Activation') +class Activation(Layer): + + def __init__(self, activation, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self.activation = activations.get(activation) + self.built = True + + def call(self, inputs): + return self.activation(inputs) + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = {'activation': activations.serialize(self.activation)} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/activations/elu.py +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.ELU') +class ELU(Layer): + + def __init__(self, alpha=1.0, **kwargs): + super().__init__(**kwargs) + self.alpha = alpha + self.supports_masking = True + self.built = True + + def call(self, inputs): + return activations.elu(inputs, alpha=self.alpha) + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/layers/activations/leaky_relu.py +import warnings +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.LeakyReLU') +class LeakyReLU(Layer): + + def __init__(self, negative_slope=0.3, **kwargs): + if 'alpha' in kwargs: + negative_slope = kwargs.pop('alpha') + warnings.warn('Argument `alpha` is deprecated. Use `negative_slope` instead.') + super().__init__(**kwargs) + if negative_slope is None or negative_slope < 0: + raise ValueError(f'The negative_slope value of a Leaky ReLU layer cannot be None or negative value. Expected a float. Received: negative_slope={negative_slope}') + self.negative_slope = negative_slope + self.supports_masking = True + self.built = True + + def call(self, inputs): + return activations.leaky_relu(inputs, negative_slope=self.negative_slope) + + def get_config(self): + config = super().get_config() + config.update({'negative_slope': self.negative_slope}) + return config + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/layers/activations/prelu.py +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.PReLU') +class PReLU(Layer): + + def __init__(self, alpha_initializer='Zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self.alpha_initializer = initializers.get(alpha_initializer) + self.alpha_regularizer = regularizers.get(alpha_regularizer) + self.alpha_constraint = constraints.get(alpha_constraint) + if shared_axes is None: + self.shared_axes = None + elif not isinstance(shared_axes, (list, tuple)): + self.shared_axes = [shared_axes] + else: + self.shared_axes = list(shared_axes) + + def build(self, input_shape): + param_shape = list(input_shape[1:]) + if self.shared_axes is not None: + for i in self.shared_axes: + param_shape[i - 1] = 1 + self.alpha = self.add_weight(shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) + axes = {} + if self.shared_axes: + for i in range(1, len(input_shape)): + if i not in self.shared_axes: + axes[i] = input_shape[i] + self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) + self.built = True + + def call(self, inputs): + pos = activations.relu(inputs) + neg = -self.alpha * activations.relu(-inputs) + return pos + neg + + def get_config(self): + config = super().get_config() + config.update({'alpha_initializer': initializers.serialize(self.alpha_initializer), 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer), 'alpha_constraint': constraints.serialize(self.alpha_constraint), 'shared_axes': self.shared_axes}) + return config + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/layers/activations/relu.py +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.ReLU') +class ReLU(Layer): + + def __init__(self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs): + super().__init__(**kwargs) + if max_value is not None and max_value < 0.0: + raise ValueError(f'max_value of a ReLU layer cannot be a negative value. Received: max_value={max_value}') + if negative_slope is None or negative_slope < 0.0: + raise ValueError(f'negative_slope of a ReLU layer cannot be a negative value. Received: negative_slope={negative_slope}') + if threshold is None or threshold < 0.0: + raise ValueError(f'threshold of a ReLU layer cannot be a negative value. Received: threshold={threshold}') + self.max_value = max_value + self.negative_slope = negative_slope + self.threshold = threshold + self.supports_masking = True + self.built = True + + def call(self, inputs): + return activations.relu(inputs, negative_slope=self.negative_slope, max_value=self.max_value, threshold=self.threshold) + + def get_config(self): + config = super().get_config() + config.update({'max_value': self.max_value, 'negative_slope': self.negative_slope, 'threshold': self.threshold}) + return config + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/layers/activations/softmax.py +from keras.src import activations +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +def _large_negative_number(dtype): + if backend.standardize_dtype(dtype) == 'float16': + return -30000.0 + return -1000000000.0 + +@keras_export('keras.layers.Softmax') +class Softmax(Layer): + + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + self.axis = axis + self.supports_masking = True + self.built = True + + def call(self, inputs, mask=None): + if mask is not None: + adder = (1.0 - backend.cast(mask, inputs.dtype)) * _large_negative_number(inputs.dtype) + inputs += adder + if isinstance(self.axis, (tuple, list)): + if len(self.axis) > 1: + return backend.numpy.exp(inputs - backend.math.logsumexp(inputs, axis=self.axis, keepdims=True)) + else: + return activations.softmax(inputs, axis=self.axis[0]) + return activations.softmax(inputs, axis=self.axis) + + def get_config(self): + config = super().get_config() + config.update({'axis': self.axis}) + return config + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/layers/attention/additive_attention.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.attention.attention import Attention + +@keras_export('keras.layers.AdditiveAttention') +class AdditiveAttention(Attention): + + def __init__(self, use_scale=True, dropout=0.0, **kwargs): + super().__init__(use_scale=use_scale, dropout=dropout, **kwargs) + + def build(self, input_shape): + self._validate_inputs(input_shape) + dim = input_shape[0][-1] + self.scale = None + if self.use_scale: + self.scale = self.add_weight(name='scale', shape=[dim], initializer='glorot_uniform', dtype=self.dtype, trainable=True) + self.built = True + + def _calculate_scores(self, query, key): + q_reshaped = ops.expand_dims(query, axis=-2) + k_reshaped = ops.expand_dims(key, axis=-3) + scale = self.scale if self.use_scale else 1.0 + return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1) + + def get_config(self): + base_config = super().get_config() + del base_config['score_mode'] + return base_config + +# File: keras-master/keras/src/layers/attention/attention.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Attention') +class Attention(Layer): + + def __init__(self, use_scale=False, score_mode='dot', dropout=0.0, seed=None, **kwargs): + super().__init__(**kwargs) + self.use_scale = use_scale + self.score_mode = score_mode + self.dropout = dropout + if self.dropout > 0: + self.seed_generator = backend.random.SeedGenerator(seed=seed) + if self.score_mode not in ['dot', 'concat']: + raise ValueError(f"Invalid value for argument score_mode. Expected one of {{'dot', 'concat'}}. Received: score_mode={score_mode}") + + def build(self, input_shape): + self._validate_inputs(input_shape) + self.scale = None + self.concat_score_weight = None + if self.use_scale: + self.scale = self.add_weight(name='scale', shape=(), initializer='ones', dtype=self.dtype, trainable=True) + if self.score_mode == 'concat': + self.concat_score_weight = self.add_weight(name='concat_score_weight', shape=(), initializer='ones', dtype=self.dtype, trainable=True) + self.built = True + + def _calculate_scores(self, query, key): + if self.score_mode == 'dot': + scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1])) + if self.scale is not None: + scores *= self.scale + elif self.score_mode == 'concat': + q_reshaped = ops.expand_dims(query, axis=-2) + k_reshaped = ops.expand_dims(key, axis=-3) + if self.scale is not None: + scores = self.concat_score_weight * ops.sum(ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1) + else: + scores = self.concat_score_weight * ops.sum(ops.tanh(q_reshaped + k_reshaped), axis=-1) + return scores + + def _apply_scores(self, scores, value, scores_mask=None, training=False): + if scores_mask is not None: + padding_mask = ops.logical_not(scores_mask) + max_value = 65504.0 if scores.dtype == 'float16' else 1000000000.0 + scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype) + weights = ops.softmax(scores, axis=-1) + if training and self.dropout > 0: + weights = backend.random.dropout(weights, self.dropout, seed=self.seed_generator) + return (ops.matmul(weights, value), weights) + + def _calculate_score_mask(self, scores, v_mask, use_causal_mask): + if use_causal_mask: + score_shape = ops.shape(scores) + mask_shape = (1, score_shape[-2], score_shape[-1]) + ones_mask = ops.ones(shape=mask_shape, dtype='int32') + row_index = ops.cumsum(ones_mask, axis=-2) + col_index = ops.cumsum(ones_mask, axis=-1) + causal_mask = ops.greater_equal(row_index, col_index) + if v_mask is not None: + v_mask = ops.expand_dims(v_mask, axis=-2) + return ops.logical_and(v_mask, causal_mask) + return causal_mask + else: + return v_mask + + def call(self, inputs, mask=None, training=False, return_attention_scores=False, use_causal_mask=False): + self._validate_inputs(inputs=inputs, mask=mask) + q = inputs[0] + v = inputs[1] + k = inputs[2] if len(inputs) > 2 else v + q_mask = mask[0] if mask else None + v_mask = mask[1] if mask else None + scores = self._calculate_scores(query=q, key=k) + scores_mask = self._calculate_score_mask(scores, v_mask, use_causal_mask) + (result, attention_scores) = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask, training=training) + if q_mask is not None: + q_mask = ops.expand_dims(q_mask, axis=-1) + result *= ops.cast(q_mask, dtype=result.dtype) + if return_attention_scores: + return (result, attention_scores) + return result + + def compute_mask(self, inputs, mask=None): + self._validate_inputs(inputs=inputs, mask=mask) + if mask is None or mask[0] is None: + return None + return ops.convert_to_tensor(mask[0]) + + def compute_output_shape(self, input_shape): + return (*input_shape[0][:-1], input_shape[1][-1]) + + def _validate_inputs(self, inputs, mask=None): + class_name = self.__class__.__name__ + if not isinstance(inputs, list): + raise ValueError(f'{class_name} layer must be called on a list of inputs, namely [query, value] or [query, value, key]. Received: inputs={inputs}.') + if len(inputs) < 2 or len(inputs) > 3: + raise ValueError(f'{class_name} layer accepts inputs list of length 2 or 3, namely [query, value] or [query, value, key]. Received length: {len(inputs)}.') + if mask is not None: + if not isinstance(mask, list): + raise ValueError(f'{class_name} layer mask must be a list, namely [query_mask, value_mask]. Received: mask={mask}.') + if len(mask) < 2 or len(mask) > 3: + raise ValueError(f'{class_name} layer accepts mask list of length 2 or 3. Received: inputs={inputs}, mask={mask}.') + + def get_config(self): + base_config = super().get_config() + config = {'use_scale': self.use_scale, 'score_mode': self.score_mode, 'dropout': self.dropout} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/attention/grouped_query_attention.py +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.layer import Layer +from keras.src.layers.regularization.dropout import Dropout + +@keras_export('keras.layers.GroupQueryAttention') +class GroupedQueryAttention(Layer): + + def __init__(self, head_dim, num_query_heads, num_key_value_heads, dropout=0.0, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self.head_dim = head_dim + self.num_query_heads = num_query_heads + self.num_key_value_heads = num_key_value_heads + if num_query_heads % num_key_value_heads != 0: + raise ValueError('`num_query_heads` must be divisible by `num_key_value_heads`.') + self.num_repeats = num_query_heads // num_key_value_heads + self.dropout = dropout + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.activity_regularizer = regularizers.get(activity_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.bias_constraint = constraints.get(bias_constraint) + + def build(self, query_shape, value_shape, key_shape=None): + key_shape = value_shape if key_shape is None else key_shape + self.feature_dim = query_shape[-1] + self._query_dense = EinsumDense('bqm,muh->bquh', output_shape=(None, self.num_query_heads, self.head_dim), bias_axes='uh' if self.use_bias else None, name='query', **self._get_common_kwargs_for_sublayer()) + self._query_dense.build(query_shape) + self._key_dense = EinsumDense('bkm,mvh->bkvh', output_shape=(None, self.num_key_value_heads, self.head_dim), bias_axes='vh' if self.use_bias else None, name='key', **self._get_common_kwargs_for_sublayer()) + self._key_dense.build(key_shape) + self._value_dense = EinsumDense('bkm,mvh->bkvh', output_shape=(None, self.num_key_value_heads, self.head_dim), bias_axes='vh' if self.use_bias else None, name='value', **self._get_common_kwargs_for_sublayer()) + self._value_dense.build(value_shape) + self._softmax = Softmax(axis=-1, dtype=self.dtype_policy) + self._dropout_layer = Dropout(rate=self.dropout, dtype=self.dtype_policy) + self._dot_product_equation = 'bquh,bkuh->buqk' + self._combine_equation = 'buqk,bkuh->bquh' + self._output_dense = EinsumDense('bquh,uhm->bqm', output_shape=(None, self.feature_dim), bias_axes='m' if self.use_bias else None, name='attention_output', **self._get_common_kwargs_for_sublayer()) + self._output_dense.build((None, None, self.num_query_heads, self.head_dim)) + self.built = True + + def _get_common_kwargs_for_sublayer(self): + common_kwargs = dict(kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, activity_regularizer=self.activity_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint, dtype=self.dtype_policy) + kernel_initializer = self.kernel_initializer.__class__.from_config(self.kernel_initializer.get_config()) + bias_initializer = self.bias_initializer.__class__.from_config(self.bias_initializer.get_config()) + common_kwargs['kernel_initializer'] = kernel_initializer + common_kwargs['bias_initializer'] = bias_initializer + return common_kwargs + + def call(self, query, value, key=None, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, return_attention_scores=False, training=None, use_causal_mask=False): + if key is None: + key = value + attention_mask = self._compute_attention_mask(query, value, query_mask=query_mask, value_mask=value_mask, key_mask=key_mask, attention_mask=attention_mask, use_causal_mask=use_causal_mask) + query = self._query_dense(query) + key = self._key_dense(key) + value = self._value_dense(value) + key = ops.repeat(key, self.num_repeats, axis=2) + value = ops.repeat(value, self.num_repeats, axis=2) + (output, scores) = self._compute_attention(query, key, value, attention_mask=attention_mask, training=training) + output = self._output_dense(output) + if return_attention_scores: + return (output, scores) + return output + + def _compute_attention_mask(self, query, value, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, use_causal_mask=False): + auto_mask = None + if query_mask is not None: + query_mask = ops.cast(query_mask, 'bool') + auto_mask = ops.expand_dims(query_mask, -1) + if value_mask is not None: + value_mask = ops.cast(value_mask, 'bool') + mask = ops.expand_dims(value_mask, -2) + auto_mask = mask if auto_mask is None else auto_mask & mask + if key_mask is not None: + key_mask = ops.cast(key_mask, 'bool') + mask = ops.expand_dims(key_mask, -2) + auto_mask = mask if auto_mask is None else auto_mask & mask + if use_causal_mask: + mask = self._compute_causal_mask(query, value) + auto_mask = mask if auto_mask is None else auto_mask & mask + if auto_mask is not None: + attention_mask = auto_mask if attention_mask is None else ops.cast(attention_mask, bool) & auto_mask + return attention_mask + + def _compute_causal_mask(self, query, value=None): + q_seq_length = ops.shape(query)[1] + v_seq_length = q_seq_length if value is None else ops.shape(value)[1] + ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype='int32') + row_index = ops.cumsum(ones_mask, axis=-2) + col_index = ops.cumsum(ones_mask, axis=-1) + return ops.greater_equal(row_index, col_index) + + def _compute_attention(self, query, key, value, attention_mask=None, training=None): + query = ops.multiply(query, 1.0 / ops.sqrt(ops.cast(self.head_dim, query.dtype))) + scores = ops.einsum(self._dot_product_equation, query, key) + scores = self._masked_softmax(scores, attention_mask=attention_mask) + scores_dropout = self._dropout_layer(scores, training=training) + output = ops.einsum(self._combine_equation, scores_dropout, value) + return (output, scores) + + def _masked_softmax(self, scores, attention_mask=None): + if attention_mask is not None: + mask_expansion_axis = -1 * 2 - 1 + for _ in range(len(scores.shape) - len(attention_mask.shape)): + attention_mask = ops.expand_dims(attention_mask, axis=mask_expansion_axis) + return self._softmax(scores, mask=attention_mask) + + def compute_output_shape(self, query_shape, value_shape, key_shape=None): + if key_shape is None: + key_shape = value_shape + if query_shape[-1] != value_shape[-1]: + raise ValueError(f'The last dimension of `query_shape` and `value_shape` must be equal, but are {query_shape[-1]}, {value_shape[-1]}. Received: query_shape={{query_shape}}, value_shape={{value_shape}}') + if value_shape[1:-1] != key_shape[1:-1]: + raise ValueError(f'All dimensions of `value` and `key`, except the last one, must be equal. Received: value_shape={value_shape} and key_shape={key_shape}') + return query_shape + + def get_config(self): + config = {'head_dim': self.head_dim, 'num_query_heads': self.num_query_heads, 'num_key_value_heads': self.num_key_value_heads, 'use_bias': self.use_bias, 'dropout': self.dropout, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/attention/multi_head_attention.py +import collections +import math +import string +import numpy as np +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.layer import Layer +from keras.src.layers.regularization.dropout import Dropout + +@keras_export('keras.layers.MultiHeadAttention') +class MultiHeadAttention(Layer): + + def __init__(self, num_heads, key_dim, value_dim=None, dropout=0.0, use_bias=True, output_shape=None, attention_axes=None, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, seed=None, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self._num_heads = num_heads + self._key_dim = key_dim + self._inverse_sqrt_key_dim = None + self._value_dim = value_dim if value_dim else key_dim + self._dropout = dropout + self._use_bias = use_bias + self._output_shape = output_shape + self._kernel_initializer = initializers.get(kernel_initializer) + self._bias_initializer = initializers.get(bias_initializer) + self._kernel_regularizer = regularizers.get(kernel_regularizer) + self._bias_regularizer = regularizers.get(bias_regularizer) + self._activity_regularizer = regularizers.get(activity_regularizer) + self._kernel_constraint = constraints.get(kernel_constraint) + self._bias_constraint = constraints.get(bias_constraint) + if isinstance(attention_axes, int): + attention_axes = (attention_axes,) + elif attention_axes and (not isinstance(attention_axes, (list, tuple))): + raise ValueError(f'`attention_axes` must be an int, list, or tuple.Received: attention_axes={attention_axes}') + self._attention_axes = attention_axes + self.seed = seed + + @property + def num_heads(self): + return self._num_heads + + @property + def key_dim(self): + return self._key_dim + + @property + def value_dim(self): + return self._value_dim + + @property + def dropout(self): + return self._dropout + + @property + def use_bias(self): + return self._use_bias + + @property + def output_shape(self): + return self._output_shape + + @property + def attention_axes(self): + return self._attention_axes + + def get_config(self): + base_config = super().get_config() + config = {'num_heads': self._num_heads, 'key_dim': self._key_dim, 'value_dim': self._value_dim, 'dropout': self._dropout, 'use_bias': self._use_bias, 'output_shape': self._output_shape, 'attention_axes': self._attention_axes, 'kernel_initializer': initializers.serialize(self._kernel_initializer), 'bias_initializer': initializers.serialize(self._bias_initializer), 'kernel_regularizer': regularizers.serialize(self._kernel_regularizer), 'bias_regularizer': regularizers.serialize(self._bias_regularizer), 'activity_regularizer': regularizers.serialize(self._activity_regularizer), 'kernel_constraint': constraints.serialize(self._kernel_constraint), 'bias_constraint': constraints.serialize(self._bias_constraint), 'seed': self.seed} + return {**base_config, **config} + + def build(self, query_shape, value_shape, key_shape=None): + key_shape = value_shape if key_shape is None else key_shape + if query_shape[-1] != value_shape[-1]: + raise ValueError(f'The last dimension of `query_shape` and `value_shape` must be equal, but are {query_shape[-1]}, {value_shape[-1]}. Received: query_shape={{query_shape}}, value_shape={{value_shape}}') + if value_shape[1:-1] != key_shape[1:-1]: + raise ValueError(f'All dimensions of `value` and `key`, except the last one, must be equal. Received: value_shape={value_shape} and key_shape={key_shape}') + query_rank = len(query_shape) + value_rank = len(value_shape) + key_rank = len(key_shape) + (einsum_equation, bias_axes, output_rank) = _build_proj_equation(query_rank - 1, bound_dims=1, output_dims=2) + self._query_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='query', **self._get_common_kwargs_for_sublayer()) + self._query_dense.build(query_shape) + (einsum_equation, bias_axes, output_rank) = _build_proj_equation(key_rank - 1, bound_dims=1, output_dims=2) + self._key_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='key', **self._get_common_kwargs_for_sublayer()) + self._key_dense.build(key_shape) + (einsum_equation, bias_axes, output_rank) = _build_proj_equation(value_rank - 1, bound_dims=1, output_dims=2) + self._value_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._value_dim]), bias_axes=bias_axes if self._use_bias else None, name='value', **self._get_common_kwargs_for_sublayer()) + self._value_dense.build(value_shape) + self._build_attention(output_rank) + self._output_dense = self._make_output_dense(query_shape, self._get_common_kwargs_for_sublayer(), 'attention_output') + output_dense_input_shape = list(self._query_dense.compute_output_shape(query_shape)) + output_dense_input_shape[-1] = self._value_dim + self._output_dense.build(tuple(output_dense_input_shape)) + self.built = True + + @property + def query_dense(self): + return self._query_dense + + @property + def key_dense(self): + return self._key_dense + + @property + def value_dense(self): + return self._value_dense + + @property + def output_dense(self): + return self._output_dense + + def _get_common_kwargs_for_sublayer(self): + common_kwargs = dict(kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, dtype=self.dtype_policy) + kernel_initializer = self._kernel_initializer.__class__.from_config(self._kernel_initializer.get_config()) + bias_initializer = self._bias_initializer.__class__.from_config(self._bias_initializer.get_config()) + common_kwargs['kernel_initializer'] = kernel_initializer + common_kwargs['bias_initializer'] = bias_initializer + return common_kwargs + + def _make_output_dense(self, query_shape, common_kwargs, name=None): + query_rank = len(query_shape) + if self._output_shape: + if not isinstance(self._output_shape, collections.abc.Sized): + output_shape = [self._output_shape] + else: + output_shape = self._output_shape + else: + output_shape = [query_shape[-1]] + (einsum_equation, bias_axes, output_rank) = _build_proj_equation(query_rank - 1, bound_dims=2, output_dims=len(output_shape)) + return EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, output_shape), bias_axes=bias_axes if self._use_bias else None, name=name, **common_kwargs) + + def _build_attention(self, rank): + if self._attention_axes is None: + self._attention_axes = tuple(range(1, rank - 2)) + else: + self._attention_axes = tuple(self._attention_axes) + (self._dot_product_equation, self._combine_equation, attn_scores_rank) = _build_attention_equation(rank, attn_axes=self._attention_axes) + norm_axes = tuple(range(attn_scores_rank - len(self._attention_axes), attn_scores_rank)) + self._softmax = Softmax(axis=norm_axes, dtype=self.dtype_policy) + self._dropout_layer = Dropout(rate=self._dropout, dtype=self.dtype_policy, seed=self.seed) + self._inverse_sqrt_key_dim = 1.0 / math.sqrt(float(self._key_dim)) + + def _masked_softmax(self, attention_scores, attention_mask=None): + if attention_mask is not None: + mask_expansion_axis = -len(self._attention_axes) * 2 - 1 + for _ in range(len(attention_scores.shape) - len(attention_mask.shape)): + attention_mask = ops.expand_dims(attention_mask, axis=mask_expansion_axis) + return self._softmax(attention_scores, mask=attention_mask) + + def _compute_attention(self, query, key, value, attention_mask=None, training=None): + query = ops.multiply(query, ops.cast(self._inverse_sqrt_key_dim, query.dtype)) + attention_scores = ops.einsum(self._dot_product_equation, key, query) + attention_scores = self._masked_softmax(attention_scores, attention_mask) + if self.dropout: + final_attn_scores = self._dropout_layer(attention_scores, training=training) + else: + final_attn_scores = attention_scores + attention_output = ops.einsum(self._combine_equation, final_attn_scores, value) + return (attention_output, attention_scores) + + def call(self, query, value, key=None, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, return_attention_scores=False, training=None, use_causal_mask=False): + if key is None: + key = value + attention_mask = self._compute_attention_mask(query, value, query_mask=query_mask, value_mask=value_mask, key_mask=key_mask, attention_mask=attention_mask, use_causal_mask=use_causal_mask) + query = self._query_dense(query) + key = self._key_dense(key) + value = self._value_dense(value) + (attention_output, attention_scores) = self._compute_attention(query, key, value, attention_mask, training) + attention_output = self._output_dense(attention_output) + if return_attention_scores: + return (attention_output, attention_scores) + return attention_output + + def _compute_attention_mask(self, query, value, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, use_causal_mask=False): + auto_mask = None + if query_mask is not None: + query_mask = ops.cast(query_mask, 'bool') + auto_mask = ops.expand_dims(query_mask, -1) + if value_mask is not None: + value_mask = ops.cast(value_mask, 'bool') + mask = ops.expand_dims(value_mask, -2) + auto_mask = mask if auto_mask is None else auto_mask & mask + if key_mask is not None: + key_mask = ops.cast(key_mask, 'bool') + mask = ops.expand_dims(key_mask, -2) + auto_mask = mask if auto_mask is None else auto_mask & mask + if use_causal_mask: + mask = self._compute_causal_mask(query, value) + auto_mask = mask if auto_mask is None else auto_mask & mask + if auto_mask is not None: + attention_mask = auto_mask if attention_mask is None else ops.cast(attention_mask, bool) & auto_mask + return attention_mask + + def _compute_causal_mask(self, query, value=None): + q_seq_length = ops.shape(query)[1] + v_seq_length = q_seq_length if value is None else ops.shape(value)[1] + ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype='int32') + row_index = ops.cumsum(ones_mask, axis=-2) + col_index = ops.cumsum(ones_mask, axis=-1) + return ops.greater_equal(row_index, col_index) + + def compute_output_shape(self, query_shape, value_shape, key_shape=None): + if key_shape is None: + key_shape = value_shape + if query_shape[-1] != value_shape[-1]: + raise ValueError(f'The last dimension of `query_shape` and `value_shape` must be equal, but are {query_shape[-1]}, {value_shape[-1]}. Received: query_shape={{query_shape}}, value_shape={{value_shape}}') + if value_shape[1:-1] != key_shape[1:-1]: + raise ValueError(f'All dimensions of `value` and `key`, except the last one, must be equal. Received: value_shape={value_shape} and key_shape={key_shape}') + if self._output_shape: + return query_shape[:-1] + self._output_shape + return query_shape + + def compute_output_spec(self, query, value, key=None, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, return_attention_scores=False, training=None, use_causal_mask=False): + if key is not None: + key_shape = key.shape + else: + key_shape = None + output_shape = self.compute_output_shape(query.shape, value.shape, key_shape) + output_spec = backend.KerasTensor(output_shape, dtype=self.compute_dtype) + if return_attention_scores: + length = query.shape[1] + attention_shape = (query.shape[0], self.num_heads, length, length) + return (output_spec, backend.KerasTensor(attention_shape, dtype=self.compute_dtype)) + return output_spec + +def _index_to_einsum_variable(i): + return string.ascii_lowercase[i] + +def _build_attention_equation(rank, attn_axes): + target_notation = '' + for i in range(rank): + target_notation += _index_to_einsum_variable(i) + batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,))) + letter_offset = rank + source_notation = '' + for i in range(rank): + if i in batch_dims or i == rank - 1: + source_notation += target_notation[i] + else: + source_notation += _index_to_einsum_variable(letter_offset) + letter_offset += 1 + product_notation = ''.join([target_notation[i] for i in batch_dims] + [target_notation[i] for i in attn_axes] + [source_notation[i] for i in attn_axes]) + dot_product_equation = '%s,%s->%s' % (source_notation, target_notation, product_notation) + attn_scores_rank = len(product_notation) + combine_equation = '%s,%s->%s' % (product_notation, source_notation, target_notation) + return (dot_product_equation, combine_equation, attn_scores_rank) + +def _build_proj_equation(free_dims, bound_dims, output_dims): + input_str = '' + kernel_str = '' + output_str = '' + bias_axes = '' + letter_offset = 0 + for i in range(free_dims): + char = _index_to_einsum_variable(i + letter_offset) + input_str += char + output_str += char + letter_offset += free_dims + for i in range(bound_dims): + char = _index_to_einsum_variable(i + letter_offset) + input_str += char + kernel_str += char + letter_offset += bound_dims + for i in range(output_dims): + char = _index_to_einsum_variable(i + letter_offset) + kernel_str += char + output_str += char + bias_axes += char + equation = f'{input_str},{kernel_str}->{output_str}' + return (equation, bias_axes, len(output_str)) + +def _get_output_shape(output_rank, known_last_dims): + return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims) + +# File: keras-master/keras/src/layers/convolutional/base_conv.py +"""""" +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple + +class BaseConv(Layer): + + def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, **kwargs): + super().__init__(activity_regularizer=activity_regularizer, **kwargs) + self.rank = rank + self.filters = filters + self.groups = groups + self.kernel_size = standardize_tuple(kernel_size, rank, 'kernel_size') + self.strides = standardize_tuple(strides, rank, 'strides') + self.dilation_rate = standardize_tuple(dilation_rate, rank, 'dilation_rate') + self.padding = standardize_padding(padding, allow_causal=rank == 1) + self.data_format = standardize_data_format(data_format) + self.activation = activations.get(activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.lora_rank = lora_rank + self.lora_enabled = False + self.input_spec = InputSpec(min_ndim=self.rank + 2) + self.data_format = self.data_format + if self.filters is not None and self.filters <= 0: + raise ValueError(f'Invalid value for argument `filters`. Expected a strictly positive value. Received filters={self.filters}.') + if self.groups <= 0: + raise ValueError(f'The number of groups must be a positive integer. Received: groups={self.groups}.') + if self.filters is not None and self.filters % self.groups != 0: + raise ValueError(f'The number of filters must be evenly divisible by the number of groups. Received: groups={self.groups}, filters={self.filters}.') + if not all(self.kernel_size): + raise ValueError(f'The argument `kernel_size` cannot contain 0. Received kernel_size={self.kernel_size}.') + if not all(self.strides): + raise ValueError(f'The argument `strides` cannot contains 0. Received strides={self.strides}') + if max(self.strides) > 1 and max(self.dilation_rate) > 1: + raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={self.strides} and dilation_rate={self.dilation_rate}') + + def build(self, input_shape): + if self.data_format == 'channels_last': + channel_axis = -1 + input_channel = input_shape[-1] + else: + channel_axis = 1 + input_channel = input_shape[1] + self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) + if input_channel % self.groups != 0: + raise ValueError(f'The number of input channels must be evenly divisible by the number of groups. Received groups={self.groups}, but the input has {input_channel} channels (full input shape is {input_shape}).') + kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters) + self.compute_output_shape(input_shape) + self._kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) + if self.use_bias: + self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) + else: + self.bias = None + self.built = True + if self.lora_rank: + self.enable_lora(self.lora_rank) + + @property + def kernel(self): + if not self.built: + raise AttributeError('You must build the layer before accessing `kernel`.') + if self.lora_enabled: + return self._kernel + ops.matmul(self.lora_kernel_a, self.lora_kernel_b) + return self._kernel + + def convolution_op(self, inputs, kernel): + return ops.conv(inputs, kernel, strides=list(self.strides), padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) + + def call(self, inputs): + outputs = self.convolution_op(inputs, self.kernel) + if self.use_bias: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + if self.activation is not None: + return self.activation(outputs) + return outputs + + def compute_output_shape(self, input_shape): + return compute_conv_output_shape(input_shape, self.filters, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + + def enable_lora(self, rank, a_initializer='he_uniform', b_initializer='zeros'): + if self.kernel_constraint: + raise ValueError('Lora is incompatible with kernel constraints. In order to enable lora on this layer, remove the `kernel_constraint` argument.') + if not self.built: + raise ValueError("Cannot enable lora on a layer that isn't yet built.") + if self.lora_enabled: + raise ValueError('lora is already enabled. This can only be done once per layer.') + self._tracker.unlock() + self.lora_kernel_a = self.add_weight(name='lora_kernel_a', shape=self._kernel.shape[:-1] + (rank,), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer) + self.lora_kernel_b = self.add_weight(name='lora_kernel_b', shape=(rank, self.filters), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer) + self._kernel.trainable = False + self._tracker.lock() + self.lora_enabled = True + self.lora_rank = rank + + def save_own_variables(self, store): + if not self.built: + return + target_variables = [self.kernel] + if self.use_bias: + target_variables.append(self.bias) + for (i, variable) in enumerate(target_variables): + store[str(i)] = variable + + def load_own_variables(self, store): + if not self.lora_enabled: + self._check_load_own_variables(store) + if not self.built: + return + target_variables = [self._kernel] + if self.use_bias: + target_variables.append(self.bias) + for (i, variable) in enumerate(target_variables): + variable.assign(store[str(i)]) + if self.lora_enabled: + self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) + self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) + + def get_config(self): + config = super().get_config() + config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'groups': self.groups, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}) + if self.lora_rank: + config['lora_rank'] = self.lora_rank + return config + + def _check_load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and (not self.built): + raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.") + raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}") + +# File: keras-master/keras/src/layers/convolutional/base_conv_transpose.py +"""""" +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.backend.common.backend_utils import compute_conv_transpose_output_shape +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple + +class BaseConvTranspose(Layer): + + def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', output_padding=None, data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): + super().__init__(trainable=trainable, name=name, activity_regularizer=activity_regularizer, **kwargs) + self.rank = rank + self.filters = filters + self.kernel_size = standardize_tuple(kernel_size, rank, 'kernel_size') + self.strides = standardize_tuple(strides, rank, 'strides') + self.dilation_rate = standardize_tuple(dilation_rate, rank, 'dilation_rate') + self.padding = standardize_padding(padding) + if output_padding is None: + self.output_padding = None + else: + self.output_padding = standardize_tuple(output_padding, rank, 'output_padding') + self.data_format = standardize_data_format(data_format) + self.activation = activations.get(activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.input_spec = InputSpec(min_ndim=self.rank + 2) + self.data_format = self.data_format + if self.filters is not None and self.filters <= 0: + raise ValueError(f'Invalid value for argument `filters`. Expected a strictly positive value. Received filters={self.filters}.') + if not all(self.kernel_size): + raise ValueError(f'The argument `kernel_size` cannot contain 0. Received kernel_size={self.kernel_size}.') + if not all(self.strides): + raise ValueError(f'The argument `strides` cannot contains 0. Received strides={self.strides}.') + if max(self.strides) > 1 and max(self.dilation_rate) > 1: + raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={self.strides} and dilation_rate={self.dilation_rate}') + + def build(self, input_shape): + if self.data_format == 'channels_last': + channel_axis = -1 + input_channel = input_shape[-1] + else: + channel_axis = 1 + input_channel = input_shape[1] + self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) + kernel_shape = self.kernel_size + (self.filters, input_channel) + self.kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype) + if self.use_bias: + self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) + else: + self.bias = None + self.built = True + + def call(self, inputs): + outputs = ops.conv_transpose(inputs, self.kernel, strides=list(self.strides), padding=self.padding, output_padding=self.output_padding, dilation_rate=self.dilation_rate, data_format=self.data_format) + if self.use_bias: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + if self.activation is not None: + return self.activation(outputs) + return outputs + + def compute_output_shape(self, input_shape): + return compute_conv_transpose_output_shape(input_shape, self.kernel_size, self.filters, strides=self.strides, padding=self.padding, output_padding=self.output_padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + + def get_config(self): + config = super().get_config() + config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}) + return config + +# File: keras-master/keras/src/layers/convolutional/base_depthwise_conv.py +"""""" +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple + +class BaseDepthwiseConv(Layer): + + def __init__(self, rank, depth_multiplier, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): + super().__init__(trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) + self.rank = rank + self.depth_multiplier = depth_multiplier + self.kernel_size = standardize_tuple(kernel_size, rank, 'kernel_size') + self.strides = standardize_tuple(strides, rank, 'strides') + self.dilation_rate = standardize_tuple(dilation_rate, rank, 'dilation_rate') + self.padding = standardize_padding(padding) + self.data_format = standardize_data_format(data_format) + self.activation = activations.get(activation) + self.use_bias = use_bias + self.depthwise_initializer = initializers.get(depthwise_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.depthwise_regularizer = regularizers.get(depthwise_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.depthwise_constraint = constraints.get(depthwise_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.input_spec = InputSpec(min_ndim=self.rank + 2) + self.data_format = self.data_format + if self.depth_multiplier is not None and self.depth_multiplier <= 0: + raise ValueError(f'Invalid value for argument `depth_multiplier`. Expected a strictly positive value. Received depth_multiplier={self.depth_multiplier}.') + if not all(self.kernel_size): + raise ValueError(f'The argument `kernel_size` cannot contain 0. Received kernel_size={self.kernel_size}.') + if not all(self.strides): + raise ValueError(f'The argument `strides` cannot contains 0. Received strides={self.strides}') + if max(self.strides) > 1 and max(self.dilation_rate) > 1: + raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={self.strides} and dilation_rate={self.dilation_rate}') + + def build(self, input_shape): + if self.data_format == 'channels_last': + channel_axis = -1 + input_channel = input_shape[-1] + else: + channel_axis = 1 + input_channel = input_shape[1] + self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) + depthwise_shape = self.kernel_size + (input_channel, self.depth_multiplier) + self.kernel = self.add_weight(name='kernel', shape=depthwise_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, trainable=True, dtype=self.dtype) + if self.use_bias: + self.bias = self.add_weight(name='bias', shape=(self.depth_multiplier * input_channel,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) + else: + self.bias = None + self.built = True + + def _get_input_channel(self, input_shape): + if self.data_format == 'channels_last': + input_channel = input_shape[-1] + else: + input_channel = input_shape[1] + return input_channel + + def call(self, inputs): + input_channel = self._get_input_channel(inputs.shape) + outputs = ops.depthwise_conv(inputs, self.kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) + if self.use_bias: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.depth_multiplier * input_channel,) + else: + bias_shape = (1, self.depth_multiplier * input_channel) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + if self.activation is not None: + return self.activation(outputs) + return outputs + + def compute_output_shape(self, input_shape): + input_channel = self._get_input_channel(input_shape) + return compute_conv_output_shape(input_shape, self.depth_multiplier * input_channel, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + + def get_config(self): + config = super().get_config() + config.update({'depth_multiplier': self.depth_multiplier, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'depthwise_initializer': initializers.serialize(self.depthwise_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'depthwise_regularizer': regularizers.serialize(self.depthwise_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'depthwise_constraint': constraints.serialize(self.depthwise_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}) + return config + +# File: keras-master/keras/src/layers/convolutional/base_separable_conv.py +"""""" +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple + +class BaseSeparableConv(Layer): + + def __init__(self, rank, depth_multiplier, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs): + super().__init__(trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) + self.rank = rank + self.depth_multiplier = depth_multiplier + self.filters = filters + self.kernel_size = standardize_tuple(kernel_size, rank, 'kernel_size') + self.strides = standardize_tuple(strides, rank, 'strides') + self.dilation_rate = standardize_tuple(dilation_rate, rank, 'dilation_rate') + self.padding = standardize_padding(padding) + self.data_format = standardize_data_format(data_format) + self.activation = activations.get(activation) + self.use_bias = use_bias + self.depthwise_initializer = initializers.get(depthwise_initializer) + self.pointwise_initializer = initializers.get(pointwise_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.depthwise_regularizer = regularizers.get(depthwise_regularizer) + self.pointwise_regularizer = regularizers.get(pointwise_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.depthwise_constraint = constraints.get(depthwise_constraint) + self.pointwise_constraint = constraints.get(pointwise_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.data_format = self.data_format + self.input_spec = InputSpec(min_ndim=self.rank + 2) + if self.depth_multiplier is not None and self.depth_multiplier <= 0: + raise ValueError(f'Invalid value for argument `depth_multiplier`. Expected a strictly positive value. Received depth_multiplier={self.depth_multiplier}.') + if self.filters is not None and self.filters <= 0: + raise ValueError(f'Invalid value for argument `filters`. Expected a strictly positive value. Received filters={self.filters}.') + if not all(self.kernel_size): + raise ValueError(f'The argument `kernel_size` cannot contain 0. Received: kernel_size={self.kernel_size}.') + if not all(self.strides): + raise ValueError(f'The argument `strides` cannot contains 0(s). Received: strides={self.strides}') + if max(self.strides) > 1 and max(self.dilation_rate) > 1: + raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={self.strides} and dilation_rate={self.dilation_rate}') + + def build(self, input_shape): + if self.data_format == 'channels_last': + channel_axis = -1 + input_channel = input_shape[-1] + else: + channel_axis = 1 + input_channel = input_shape[1] + self.input_spec = InputSpec(min_ndim=self.rank + 2, axes={channel_axis: input_channel}) + depthwise_kernel_shape = self.kernel_size + (input_channel, self.depth_multiplier) + pointwise_kernel_shape = (1,) * self.rank + (self.depth_multiplier * input_channel, self.filters) + self.depthwise_kernel = self.add_weight(name='depthwise_kernel', shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, trainable=True, dtype=self.dtype) + self.pointwise_kernel = self.add_weight(name='pointwise_kernel', shape=pointwise_kernel_shape, initializer=self.pointwise_initializer, regularizer=self.pointwise_regularizer, constraint=self.pointwise_constraint, trainable=True, dtype=self.dtype) + if self.use_bias: + self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype) + else: + self.bias = None + self.built = True + + def call(self, inputs): + outputs = ops.separable_conv(inputs, self.depthwise_kernel, self.pointwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) + if self.use_bias: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + if self.activation is not None: + return self.activation(outputs) + return outputs + + def compute_output_shape(self, input_shape): + return compute_conv_output_shape(input_shape, self.filters, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + + def get_config(self): + config = super().get_config() + config.update({'depth_multiplier': self.depth_multiplier, 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'depthwise_initializer': initializers.serialize(self.depthwise_initializer), 'pointwise_initializer': initializers.serialize(self.pointwise_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'depthwise_regularizer': regularizers.serialize(self.depthwise_regularizer), 'pointwise_regularizer': regularizers.serialize(self.pointwise_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'depthwise_constraint': constraints.serialize(self.depthwise_constraint), 'pointwise_constraint': constraints.serialize(self.pointwise_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}) + return config + +# File: keras-master/keras/src/layers/convolutional/conv1d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv + +@keras_export(['keras.layers.Conv1D', 'keras.layers.Convolution1D']) +class Conv1D(BaseConv): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + + def _compute_causal_padding(self): + left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1) + if self.data_format == 'channels_last': + causal_padding = [[0, 0], [left_pad, 0], [0, 0]] + else: + causal_padding = [[0, 0], [0, 0], [left_pad, 0]] + return causal_padding + + def call(self, inputs): + padding = self.padding + if self.padding == 'causal': + inputs = ops.pad(inputs, self._compute_causal_padding()) + padding = 'valid' + outputs = ops.conv(inputs, self.kernel, strides=list(self.strides), padding=padding, dilation_rate=self.dilation_rate, data_format=self.data_format) + if self.use_bias: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + if self.activation is not None: + return self.activation(outputs) + return outputs + +# File: keras-master/keras/src/layers/convolutional/conv1d_transpose.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose + +@keras_export(['keras.layers.Conv1DTranspose', 'keras.layers.Convolution1DTranspose']) +class Conv1DTranspose(BaseConvTranspose): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/conv2d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv + +@keras_export(['keras.layers.Conv2D', 'keras.layers.Convolution2D']) +class Conv2D(BaseConv): + + def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/conv2d_transpose.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose + +@keras_export(['keras.layers.Conv2DTranspose', 'keras.layers.Convolution2DTranspose']) +class Conv2DTranspose(BaseConvTranspose): + + def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/conv3d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv + +@keras_export(['keras.layers.Conv3D', 'keras.layers.Convolution3D']) +class Conv3D(BaseConv): + + def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=3, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/conv3d_transpose.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose + +@keras_export(['keras.layers.Conv3DTranspose', 'keras.layers.Convolution3DTranspose']) +class Conv3DTranspose(BaseConvTranspose): + + def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=3, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/depthwise_conv1d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv + +@keras_export('keras.layers.DepthwiseConv1D') +class DepthwiseConv1D(BaseDepthwiseConv): + + def __init__(self, kernel_size, strides=1, padding='valid', depth_multiplier=1, data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=1, depth_multiplier=depth_multiplier, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/depthwise_conv2d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv + +@keras_export('keras.layers.DepthwiseConv2D') +class DepthwiseConv2D(BaseDepthwiseConv): + + def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=2, depth_multiplier=depth_multiplier, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/separable_conv1d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv + +@keras_export(['keras.layers.SeparableConv1D', 'keras.layers.SeparableConvolution1D']) +class SeparableConv1D(BaseSeparableConv): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=1, depth_multiplier=depth_multiplier, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/convolutional/separable_conv2d.py +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv + +@keras_export(['keras.layers.SeparableConv2D', 'keras.layers.SeparableConvolution2D']) +class SeparableConv2D(BaseSeparableConv): + + def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs): + super().__init__(rank=2, depth_multiplier=depth_multiplier, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, pointwise_constraint=pointwise_constraint, bias_constraint=bias_constraint, **kwargs) + +# File: keras-master/keras/src/layers/core/dense.py +import ml_dtypes +from keras.src import activations +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Dense') +class Dense(Layer): + + def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, **kwargs): + super().__init__(activity_regularizer=activity_regularizer, **kwargs) + self.units = units + self.activation = activations.get(activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.lora_rank = lora_rank + self.lora_enabled = False + self.input_spec = InputSpec(min_ndim=2) + self.supports_masking = True + + def build(self, input_shape): + input_dim = input_shape[-1] + if self.quantization_mode: + self.quantized_build(input_shape, mode=self.quantization_mode) + if self.quantization_mode != 'int8': + self._kernel = self.add_weight(name='kernel', shape=(input_dim, self.units), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) + if self.use_bias: + self.bias = self.add_weight(name='bias', shape=(self.units,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) + else: + self.bias = None + self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) + self.built = True + if self.lora_rank: + self.enable_lora(self.lora_rank) + + @property + def kernel(self): + if not self.built: + raise AttributeError('You must build the layer before accessing `kernel`.') + if self.lora_enabled: + return self._kernel + ops.matmul(self.lora_kernel_a, self.lora_kernel_b) + return self._kernel + + def call(self, inputs, training=None): + x = ops.matmul(inputs, self.kernel) + if self.bias is not None: + x = ops.add(x, self.bias) + if self.activation is not None: + x = self.activation(x) + return x + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + output_shape[-1] = self.units + return tuple(output_shape) + + def enable_lora(self, rank, a_initializer='he_uniform', b_initializer='zeros'): + if self.kernel_constraint: + raise ValueError('Lora is incompatible with kernel constraints. In order to enable lora on this layer, remove the `kernel_constraint` argument.') + if not self.built: + raise ValueError("Cannot enable lora on a layer that isn't yet built.") + if self.lora_enabled: + raise ValueError('lora is already enabled. This can only be done once per layer.') + self._tracker.unlock() + self.lora_kernel_a = self.add_weight(name='lora_kernel_a', shape=(self.kernel.shape[0], rank), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer) + self.lora_kernel_b = self.add_weight(name='lora_kernel_b', shape=(rank, self.kernel.shape[1]), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer) + self._kernel.trainable = False + self._tracker.lock() + self.lora_enabled = True + self.lora_rank = rank + + def save_own_variables(self, store): + if not self.built: + return + (kernel_value, kernel_scale) = self._get_kernel_with_merged_lora() + target_variables = [kernel_value] + if self.use_bias: + target_variables.append(self.bias) + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(kernel_scale) + elif self.quantization_mode == 'float8': + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + store[str(i)] = variable + + def load_own_variables(self, store): + if not self.lora_enabled: + self._check_load_own_variables(store) + if not self.built: + return + target_variables = [self._kernel] + if self.use_bias: + target_variables.append(self.bias) + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(self.kernel_scale) + elif self.quantization_mode == 'float8': + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + variable.assign(store[str(i)]) + if self.lora_enabled: + self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) + self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) + + def get_config(self): + base_config = super().get_config() + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)} + if self.lora_rank: + config['lora_rank'] = self.lora_rank + return {**base_config, **config} + + def _check_load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and (not self.built): + raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.") + raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}") + + def quantized_build(self, input_shape, mode): + if mode == 'int8': + input_dim = input_shape[-1] + kernel_shape = (input_dim, self.units) + self._int8_build(kernel_shape) + elif mode == 'float8': + self._float8_build() + else: + raise self._quantization_mode_error(mode) + + def _int8_build(self, kernel_shape, kernel_initializer='zeros', kernel_scale_initializer='ones'): + self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) + self._kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=kernel_initializer, dtype='int8', trainable=False) + self.kernel_scale = self.add_weight(name='kernel_scale', shape=(self.units,), initializer=kernel_scale_initializer, trainable=False) + self._is_quantized = True + + def _float8_build(self): + from keras.src.dtype_policies import QuantizedFloat8DTypePolicy + amax_history_length = getattr(self.dtype_policy, 'amax_history_length', QuantizedFloat8DTypePolicy.default_amax_history_length) + scale_kwargs = {'shape': (), 'initializer': 'ones', 'dtype': 'float32', 'trainable': True, 'autocast': False} + amax_history_kwargs = {'shape': (amax_history_length,), 'initializer': 'zeros', 'dtype': 'float32', 'trainable': True, 'autocast': False} + self.inputs_scale = self.add_weight(name='inputs_scale', **scale_kwargs) + self.inputs_amax_history = self.add_weight(name='inputs_amax_history', **amax_history_kwargs) + self.kernel_scale = self.add_weight(name='kernel_scale', **scale_kwargs) + self.kernel_amax_history = self.add_weight(name='kernel_amax_history', **amax_history_kwargs) + self.outputs_grad_scale = self.add_weight(name='outputs_grad_scale', **scale_kwargs) + self.outputs_grad_amax_history = self.add_weight(name='outputs_grad_amax_history', **amax_history_kwargs) + self.inputs_scale.overwrite_with_gradient = True + self.inputs_amax_history.overwrite_with_gradient = True + self.kernel_scale.overwrite_with_gradient = True + self.kernel_amax_history.overwrite_with_gradient = True + self.outputs_grad_scale.overwrite_with_gradient = True + self.outputs_grad_amax_history.overwrite_with_gradient = True + self._is_quantized = True + + def _int8_call(self, inputs, training=None): + + @ops.custom_gradient + def matmul_with_inputs_gradient(inputs, kernel, kernel_scale): + + def grad_fn(*args, upstream=None): + if upstream is None: + (upstream,) = args + float_kernel = ops.divide(ops.cast(kernel, dtype=self.compute_dtype), kernel_scale) + inputs_grad = ops.matmul(upstream, ops.transpose(float_kernel)) + return (inputs_grad, None, None) + (inputs, inputs_scale) = self.inputs_quantizer(inputs) + x = ops.matmul(inputs, kernel) + x = ops.cast(x, self.compute_dtype) + x = ops.divide(x, ops.multiply(inputs_scale, kernel_scale)) + return (x, grad_fn) + x = matmul_with_inputs_gradient(inputs, ops.convert_to_tensor(self._kernel), ops.convert_to_tensor(self.kernel_scale)) + if self.lora_enabled: + lora_x = ops.matmul(inputs, self.lora_kernel_a) + lora_x = ops.matmul(lora_x, self.lora_kernel_b) + x = ops.add(x, lora_x) + if self.bias is not None: + x = ops.add(x, self.bias) + if self.activation is not None: + x = self.activation(x) + return x + + def _float8_call(self, inputs, training=None): + if self.lora_enabled: + raise NotImplementedError("Currently, `_float8_call` doesn't support LoRA") + + @ops.custom_gradient + def quantized_dequantize_inputs(inputs, scale, amax_history): + if training: + new_scale = quantizers.compute_float8_scale(ops.max(amax_history, axis=0), scale, ops.cast(float(ml_dtypes.finfo('float8_e4m3fn').max), 'float32')) + new_amax_history = quantizers.compute_float8_amax_history(inputs, amax_history) + else: + new_scale = None + new_amax_history = None + qdq_inputs = quantizers.quantize_and_dequantize(inputs, scale, 'float8_e4m3fn', self.compute_dtype) + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + return (upstream, new_scale, new_amax_history) + return (qdq_inputs, grad) + + @ops.custom_gradient + def quantized_dequantize_outputs(outputs, scale, amax_history): + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + new_scale = quantizers.compute_float8_scale(ops.max(amax_history, axis=0), scale, ops.cast(float(ml_dtypes.finfo('float8_e5m2').max), 'float32')) + qdq_upstream = quantizers.quantize_and_dequantize(upstream, scale, 'float8_e5m2', self.compute_dtype) + new_amax_history = quantizers.compute_float8_amax_history(upstream, amax_history) + return (qdq_upstream, new_scale, new_amax_history) + return (outputs, grad) + x = ops.matmul(quantized_dequantize_inputs(inputs, ops.convert_to_tensor(self.inputs_scale), ops.convert_to_tensor(self.inputs_amax_history)), quantized_dequantize_inputs(ops.convert_to_tensor(self._kernel), ops.convert_to_tensor(self.kernel_scale), ops.convert_to_tensor(self.kernel_amax_history))) + x = quantized_dequantize_outputs(x, ops.convert_to_tensor(self.outputs_grad_scale), ops.convert_to_tensor(self.outputs_grad_amax_history)) + if self.bias is not None: + bias = self.bias + if self.dtype_policy.compute_dtype == 'float32': + bias_bf16 = ops.cast(bias, 'bfloat16') + bias = ops.cast(bias_bf16, bias.dtype) + x = ops.add(x, bias) + if self.activation is not None: + x = self.activation(x) + return x + + def quantize(self, mode, type_check=True): + if type_check and type(self) is not Dense: + raise self._not_implemented_error(self.quantize) + if mode == 'int8': + (kernel_value, kernel_scale) = quantizers.abs_max_quantize(self._kernel, axis=0, to_numpy=True) + kernel_scale = ops.squeeze(kernel_scale, axis=0) + kernel_shape = tuple(self._kernel.shape) + del self._kernel + self._int8_build(kernel_shape, lambda shape, dtype: kernel_value, lambda shape, dtype: kernel_scale) + elif mode == 'float8': + self._float8_build() + else: + raise self._quantization_mode_error(mode) + if self.dtype_policy.quantization_mode is None: + policy = dtype_policies.get(f'{mode}_from_{self.dtype_policy.name}') + self.dtype_policy = policy + + def _get_kernel_with_merged_lora(self): + if self.dtype_policy.quantization_mode is not None: + kernel_value = self._kernel + kernel_scale = self.kernel_scale + if self.lora_enabled: + kernel_value = ops.divide(kernel_value, kernel_scale) + kernel_value = ops.add(kernel_value, ops.matmul(self.lora_kernel_a, self.lora_kernel_b)) + (kernel_value, kernel_scale) = quantizers.abs_max_quantize(kernel_value, axis=0, to_numpy=True) + kernel_scale = ops.squeeze(kernel_scale, axis=0) + return (kernel_value, kernel_scale) + return (self.kernel, None) + +# File: keras-master/keras/src/layers/core/einsum_dense.py +import re +import string +import ml_dtypes +import numpy as np +from keras.src import activations +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.EinsumDense') +class EinsumDense(Layer): + + def __init__(self, equation, output_shape, activation=None, bias_axes=None, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, **kwargs): + super().__init__(**kwargs) + self.equation = equation + if isinstance(output_shape, int): + self.partial_output_shape = (output_shape,) + else: + self.partial_output_shape = tuple(output_shape) + self.bias_axes = bias_axes + self.activation = activations.get(activation) + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.lora_rank = lora_rank + self.lora_enabled = False + + def build(self, input_shape): + shape_data = _analyze_einsum_string(self.equation, self.bias_axes, input_shape, self.partial_output_shape) + (kernel_shape, bias_shape, full_output_shape) = shape_data + self.full_output_shape = tuple(full_output_shape) + self.input_spec = InputSpec(ndim=len(input_shape)) + if self.quantization_mode is not None: + self.quantized_build(input_shape, mode=self.quantization_mode) + if self.quantization_mode != 'int8': + self._kernel = self.add_weight(name='kernel', shape=tuple(kernel_shape), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True) + if bias_shape is not None: + self.bias = self.add_weight(name='bias', shape=tuple(bias_shape), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True) + else: + self.bias = None + self.built = True + if self.lora_rank: + self.enable_lora(self.lora_rank) + + @property + def kernel(self): + if not self.built: + raise AttributeError('You must build the layer before accessing `kernel`.') + if self.lora_enabled: + return self._kernel + ops.matmul(self.lora_kernel_a, self.lora_kernel_b) + return self._kernel + + def compute_output_shape(self, _): + return self.full_output_shape + + def call(self, inputs, training=None): + x = ops.einsum(self.equation, inputs, self.kernel) + if self.bias is not None: + x += self.bias + if self.activation is not None: + x = self.activation(x) + return x + + def enable_lora(self, rank, a_initializer='he_uniform', b_initializer='zeros'): + if self.kernel_constraint: + raise ValueError('Lora is incompatible with kernel constraints. In order to enable lora on this layer, remove the `kernel_constraint` argument.') + if not self.built: + raise ValueError("Cannot enable lora on a layer that isn't yet built.") + if self.lora_enabled: + raise ValueError('lora is already enabled. This can only be done once per layer.') + self._tracker.unlock() + self.lora_kernel_a = self.add_weight(name='lora_kernel_a', shape=self.kernel.shape[:-1] + (rank,), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer) + self.lora_kernel_b = self.add_weight(name='lora_kernel_b', shape=(rank, self.kernel.shape[-1]), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer) + self._kernel.trainable = False + self._tracker.lock() + self.lora_enabled = True + self.lora_rank = rank + + def save_own_variables(self, store): + if not self.built: + return + (kernel_value, kernel_scale) = self._get_kernel_with_merged_lora() + target_variables = [kernel_value] + if self.bias is not None: + target_variables.append(self.bias) + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(kernel_scale) + elif self.quantization_mode == 'float8': + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + store[str(i)] = variable + + def load_own_variables(self, store): + if not self.lora_enabled: + self._check_load_own_variables(store) + if not self.built: + return + target_variables = [self._kernel] + if self.bias is not None: + target_variables.append(self.bias) + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(self.kernel_scale) + elif self.quantization_mode == 'float8': + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + variable.assign(store[str(i)]) + if self.lora_enabled: + self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) + self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) + + def get_config(self): + base_config = super().get_config() + config = {'output_shape': self.partial_output_shape, 'equation': self.equation, 'activation': activations.serialize(self.activation), 'bias_axes': self.bias_axes, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)} + if self.lora_rank: + config['lora_rank'] = self.lora_rank + return {**base_config, **config} + + def _check_load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and (not self.built): + raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.") + raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}") + + def quantized_build(self, input_shape, mode): + if mode == 'int8': + shape_data = _analyze_einsum_string(self.equation, self.bias_axes, input_shape, self.partial_output_shape) + (kernel_shape, _, _) = shape_data + self._int8_build(kernel_shape) + elif mode == 'float8': + self._float8_build() + else: + raise self._quantization_mode_error(mode) + + def _int8_build(self, kernel_shape, kernel_initializer='zeros', kernel_scale_initializer='ones'): + (self._input_reduced_axes, self._kernel_reduced_axes, self._input_transpose_axes, self._kernel_transpose_axes, self._input_expand_axes, self._kernel_expand_axes, self._input_squeeze_axes, self._kernel_squeeze_axes, self._custom_gradient_equation, self._kernel_reverse_transpose_axes) = _analyze_quantization_info(self.equation, self.input_spec.ndim) + self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=self._input_reduced_axes) + self._kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=kernel_initializer, dtype='int8', trainable=False) + kernel_scale_shape = np.array(kernel_shape) + kernel_scale_shape[self._kernel_reduced_axes] = 1 + kernel_scale_shape = kernel_scale_shape[self._kernel_transpose_axes] + kernel_scale_shape = kernel_scale_shape.tolist() + for a in sorted(self._kernel_expand_axes): + kernel_scale_shape.insert(a, 1) + for a in sorted(self._kernel_squeeze_axes, reverse=True): + kernel_scale_shape.pop(a) + self.kernel_scale = self.add_weight(name='kernel_scale', shape=kernel_scale_shape, initializer=kernel_scale_initializer, trainable=False) + self._is_quantized = True + + def _float8_build(self): + from keras.src.dtype_policies import QuantizedFloat8DTypePolicy + amax_history_length = getattr(self.dtype_policy, 'amax_history_length', QuantizedFloat8DTypePolicy.default_amax_history_length) + scale_kwargs = {'shape': (), 'initializer': 'ones', 'dtype': 'float32', 'trainable': True, 'autocast': False} + amax_history_kwargs = {'shape': (amax_history_length,), 'initializer': 'zeros', 'dtype': 'float32', 'trainable': True, 'autocast': False} + self.inputs_scale = self.add_weight(name='inputs_scale', **scale_kwargs) + self.inputs_amax_history = self.add_weight(name='inputs_amax_history', **amax_history_kwargs) + self.kernel_scale = self.add_weight(name='kernel_scale', **scale_kwargs) + self.kernel_amax_history = self.add_weight(name='kernel_amax_history', **amax_history_kwargs) + self.outputs_grad_scale = self.add_weight(name='outputs_grad_scale', **scale_kwargs) + self.outputs_grad_amax_history = self.add_weight(name='outputs_grad_amax_history', **amax_history_kwargs) + self.inputs_scale.overwrite_with_gradient = True + self.inputs_amax_history.overwrite_with_gradient = True + self.kernel_scale.overwrite_with_gradient = True + self.kernel_amax_history.overwrite_with_gradient = True + self.outputs_grad_scale.overwrite_with_gradient = True + self.outputs_grad_amax_history.overwrite_with_gradient = True + self._is_quantized = True + + def _int8_call(self, inputs, training=None): + + @ops.custom_gradient + def einsum_with_inputs_gradient(inputs, kernel, kernel_scale): + + def grad_fn(*args, upstream=None): + if upstream is None: + (upstream,) = args + _kernel_scale = kernel_scale + if self._kernel_squeeze_axes: + _kernel_scale = ops.expand_dims(_kernel_scale, axis=self._kernel_squeeze_axes) + if self._kernel_expand_axes: + _kernel_scale = ops.squeeze(_kernel_scale, axis=self._kernel_expand_axes) + _kernel_scale = ops.transpose(_kernel_scale, self._kernel_reverse_transpose_axes) + float_kernel = ops.divide(ops.cast(kernel, dtype=self.compute_dtype), _kernel_scale) + inputs_grad = ops.einsum(self._custom_gradient_equation, upstream, float_kernel) + return (inputs_grad, None, None) + (inputs, inputs_scale) = self.inputs_quantizer(inputs) + x = ops.einsum(self.equation, inputs, kernel) + inputs_scale = ops.transpose(inputs_scale, self._input_transpose_axes) + if self._input_expand_axes: + inputs_scale = ops.expand_dims(inputs_scale, axis=self._input_expand_axes) + if self._input_squeeze_axes: + inputs_scale = ops.squeeze(inputs_scale, axis=self._input_squeeze_axes) + x = ops.cast(x, self.compute_dtype) + x = ops.divide(x, ops.multiply(inputs_scale, kernel_scale)) + return (x, grad_fn) + x = einsum_with_inputs_gradient(inputs, ops.convert_to_tensor(self._kernel), ops.convert_to_tensor(self.kernel_scale)) + if self.lora_enabled: + lora_x = ops.einsum(self.equation, inputs, self.lora_kernel_a) + lora_x = ops.matmul(lora_x, self.lora_kernel_b) + x = ops.add(x, lora_x) + if self.bias is not None: + x += self.bias + if self.activation is not None: + x = self.activation(x) + return x + + def _float8_call(self, inputs, training=None): + if self.lora_enabled: + raise NotImplementedError("Currently, `_float8_call` doesn't support LoRA") + + @ops.custom_gradient + def quantized_dequantize_inputs(inputs, scale, amax_history): + if training: + new_scale = quantizers.compute_float8_scale(ops.max(amax_history, axis=0), scale, ops.cast(float(ml_dtypes.finfo('float8_e4m3fn').max), 'float32')) + new_amax_history = quantizers.compute_float8_amax_history(inputs, amax_history) + else: + new_scale = None + new_amax_history = None + qdq_inputs = quantizers.quantize_and_dequantize(inputs, scale, 'float8_e4m3fn', self.compute_dtype) + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + return (upstream, new_scale, new_amax_history) + return (qdq_inputs, grad) + + @ops.custom_gradient + def quantized_dequantize_outputs(outputs, scale, amax_history): + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + new_scale = quantizers.compute_float8_scale(ops.max(amax_history, axis=0), scale, ops.cast(float(ml_dtypes.finfo('float8_e5m2').max), 'float32')) + qdq_upstream = quantizers.quantize_and_dequantize(upstream, scale, 'float8_e5m2', self.compute_dtype) + new_amax_history = quantizers.compute_float8_amax_history(upstream, amax_history) + return (qdq_upstream, new_scale, new_amax_history) + return (outputs, grad) + x = ops.einsum(self.equation, quantized_dequantize_inputs(inputs, ops.convert_to_tensor(self.inputs_scale), ops.convert_to_tensor(self.inputs_amax_history)), quantized_dequantize_inputs(ops.convert_to_tensor(self._kernel), ops.convert_to_tensor(self.kernel_scale), ops.convert_to_tensor(self.kernel_amax_history))) + x = quantized_dequantize_outputs(x, ops.convert_to_tensor(self.outputs_grad_scale), ops.convert_to_tensor(self.outputs_grad_amax_history)) + if self.bias is not None: + bias = self.bias + if self.dtype_policy.compute_dtype == 'float32': + bias_bf16 = ops.cast(bias, 'bfloat16') + bias = ops.cast(bias_bf16, bias.dtype) + x = ops.add(x, bias) + if self.activation is not None: + x = self.activation(x) + return x + + def quantize(self, mode, type_check=True): + if type_check and type(self) is not EinsumDense: + raise self._not_implemented_error(self.quantize) + if mode == 'int8': + (self._input_reduced_axes, self._kernel_reduced_axes, self._input_transpose_axes, self._kernel_transpose_axes, self._input_expand_axes, self._kernel_expand_axes, self._input_squeeze_axes, self._kernel_squeeze_axes, self._custom_gradient_equation, self._kernel_reverse_transpose_axes) = _analyze_quantization_info(self.equation, self.input_spec.ndim) + (kernel_value, kernel_scale) = quantizers.abs_max_quantize(self._kernel, axis=self._kernel_reduced_axes, to_numpy=True) + kernel_scale = ops.transpose(kernel_scale, self._kernel_transpose_axes) + if self._kernel_expand_axes: + kernel_scale = ops.expand_dims(kernel_scale, axis=self._kernel_expand_axes) + if self._kernel_squeeze_axes: + kernel_scale = ops.squeeze(kernel_scale, axis=self._kernel_squeeze_axes) + kernel_shape = tuple(self._kernel.shape) + del self._kernel + self._int8_build(kernel_shape, lambda shape, dtype: kernel_value, lambda shape, dtype: kernel_scale) + elif mode == 'float8': + self._float8_build() + else: + raise self._quantization_mode_error(mode) + if self.dtype_policy.quantization_mode is None: + policy = dtype_policies.get(f'{mode}_from_{self.dtype_policy.name}') + self.dtype_policy = policy + + def _get_kernel_with_merged_lora(self): + if self.dtype_policy.quantization_mode is not None: + kernel_value = self._kernel + kernel_scale = self.kernel_scale + if self.lora_enabled: + if self._kernel_squeeze_axes: + kernel_scale = ops.expand_dims(kernel_scale, axis=self._kernel_squeeze_axes) + if self._kernel_expand_axes: + kernel_scale = ops.squeeze(kernel_scale, axis=self._kernel_expand_axes) + if self._kernel_transpose_axes: + + def _argsort(seq): + return sorted(range(len(seq)), key=seq.__getitem__) + reverse_transpose = _argsort(self._kernel_transpose_axes) + kernel_scale = ops.transpose(kernel_scale, axes=reverse_transpose) + kernel_value = ops.divide(kernel_value, kernel_scale) + kernel_value = ops.add(kernel_value, ops.matmul(self.lora_kernel_a, self.lora_kernel_b)) + (kernel_value, kernel_scale) = quantizers.abs_max_quantize(kernel_value, axis=self._kernel_reduced_axes, to_numpy=True) + kernel_scale = ops.transpose(kernel_scale, self._kernel_transpose_axes) + if self._kernel_expand_axes: + kernel_scale = ops.expand_dims(kernel_scale, axis=self._kernel_expand_axes) + if self._kernel_squeeze_axes: + kernel_scale = ops.squeeze(kernel_scale, axis=self._kernel_squeeze_axes) + else: + kernel_value = self.kernel + kernel_scale = None + return (kernel_value, kernel_scale) + +def _analyze_einsum_string(equation, bias_axes, input_shape, output_shape): + dot_replaced_string = re.sub('\\.\\.\\.', '0', equation) + split_string = re.match('([a-zA-Z]+),([a-zA-Z]+)->([a-zA-Z]+)', dot_replaced_string) + if split_string: + return _analyze_split_string(split_string, bias_axes, input_shape, output_shape) + split_string = re.match('0([a-zA-Z]+),([a-zA-Z]+)->0([a-zA-Z]+)', dot_replaced_string) + if split_string: + return _analyze_split_string(split_string, bias_axes, input_shape, output_shape, left_elided=True) + split_string = re.match('([a-zA-Z]{2,})0,([a-zA-Z]+)->([a-zA-Z]+)0', dot_replaced_string) + if split_string: + return _analyze_split_string(split_string, bias_axes, input_shape, output_shape) + raise ValueError(f"Invalid einsum equation '{equation}'. Equations must be in the form [X],[Y]->[Z], ...[X],[Y]->...[Z], or [X]...,[Y]->[Z]....") + +def _analyze_split_string(split_string, bias_axes, input_shape, output_shape, left_elided=False): + input_spec = split_string.group(1) + weight_spec = split_string.group(2) + output_spec = split_string.group(3) + elided = len(input_shape) - len(input_spec) + if isinstance(output_shape, int): + output_shape = [output_shape] + else: + output_shape = list(output_shape) + output_shape.insert(0, input_shape[0]) + if elided > 0 and left_elided: + for i in range(1, elided): + output_shape.insert(1, input_shape[i]) + elif elided > 0 and (not left_elided): + for i in range(len(input_shape) - elided, len(input_shape)): + output_shape.append(input_shape[i]) + if left_elided: + input_dim_map = {dim: i + elided - len(input_shape) for (i, dim) in enumerate(input_spec)} + output_dim_map = {dim: i + elided for (i, dim) in enumerate(output_spec)} + else: + input_dim_map = {dim: i for (i, dim) in enumerate(input_spec)} + output_dim_map = {dim: i for (i, dim) in enumerate(output_spec)} + for dim in input_spec: + input_shape_at_dim = input_shape[input_dim_map[dim]] + if dim in output_dim_map: + output_shape_at_dim = output_shape[output_dim_map[dim]] + if output_shape_at_dim is not None and output_shape_at_dim != input_shape_at_dim: + raise ValueError(f"Input shape and output shape do not match at shared dimension '{dim}'. Input shape is {input_shape_at_dim}, and output shape is {output_shape[output_dim_map[dim]]}.") + for dim in output_spec: + if dim not in input_spec and dim not in weight_spec: + raise ValueError(f"Dimension '{dim}' was specified in the output '{output_spec}' but has no corresponding dim in the input spec '{input_spec}' or weight spec '{output_spec}'") + weight_shape = [] + for dim in weight_spec: + if dim in input_dim_map: + weight_shape.append(input_shape[input_dim_map[dim]]) + elif dim in output_dim_map: + weight_shape.append(output_shape[output_dim_map[dim]]) + else: + raise ValueError(f"Weight dimension '{dim}' did not have a match in either the input spec '{input_spec}' or the output spec '{output_spec}'. For this layer, the weight must be fully specified.") + if bias_axes is not None: + num_left_elided = elided if left_elided else 0 + idx_map = {char: output_shape[i + num_left_elided] for (i, char) in enumerate(output_spec)} + for char in bias_axes: + if char not in output_spec: + raise ValueError(f"Bias dimension '{char}' was requested, but is not part of the output spec '{output_spec}'") + first_bias_location = min([output_spec.find(char) for char in bias_axes]) + bias_output_spec = output_spec[first_bias_location:] + bias_shape = [idx_map[char] if char in bias_axes else 1 for char in bias_output_spec] + if not left_elided: + for _ in range(elided): + bias_shape.append(1) + else: + bias_shape = None + return (weight_shape, bias_shape, output_shape) + +def _analyze_quantization_info(equation, input_shape): + + def get_specs(equation, input_shape): + possible_labels = string.ascii_letters + dot_replaced_string = re.sub('\\.\\.\\.', '0', equation) + split_string = re.match('([a-zA-Z]+),([a-zA-Z]+)->([a-zA-Z]+)', dot_replaced_string) + if split_string is not None: + input_spec = split_string.group(1) + weight_spec = split_string.group(2) + output_spec = split_string.group(3) + return (input_spec, weight_spec, output_spec) + split_string = re.match('0([a-zA-Z]+),([a-zA-Z]+)->0([a-zA-Z]+)', dot_replaced_string) + if split_string is not None: + input_spec = split_string.group(1) + weight_spec = split_string.group(2) + output_spec = split_string.group(3) + elided = len(input_shape) - len(input_spec) + possible_labels = sorted(set(possible_labels) - set(input_spec) - set(weight_spec) - set(output_spec)) + for i in range(elided): + input_spec = possible_labels[i] + input_spec + output_spec = possible_labels[i] + output_spec + return (input_spec, weight_spec, output_spec) + split_string = re.match('([a-zA-Z]{2,})0,([a-zA-Z]+)->([a-zA-Z]+)0', dot_replaced_string) + if split_string is not None: + input_spec = split_string.group(1) + weight_spec = split_string.group(2) + output_spec = split_string.group(3) + elided = len(input_shape) - len(input_spec) + possible_labels = sorted(set(possible_labels) - set(input_spec) - set(weight_spec) - set(output_spec)) + for i in range(elided): + input_spec = input_spec + possible_labels[i] + output_spec = output_spec + possible_labels[i] + return (input_spec, weight_spec, output_spec) + raise ValueError(f"Invalid einsum equation '{equation}'. Equations must be in the form [X],[Y]->[Z], ...[X],[Y]->...[Z], or [X]...,[Y]->[Z]....") + (input_spec, weight_spec, output_spec) = get_specs(equation, input_shape) + input_reduced_axes = [] + weight_reduced_axes = [] + for (i, label) in enumerate(input_spec): + index = output_spec.find(label) + if index == -1: + input_reduced_axes.append(i) + for (i, label) in enumerate(weight_spec): + index = output_spec.find(label) + if index == -1: + weight_reduced_axes.append(i) + input_expand_axes = [] + weight_expand_axes = [] + for (i, label) in enumerate(output_spec): + index_input = input_spec.find(label) + index_weight = weight_spec.find(label) + if index_input == -1: + input_expand_axes.append(i) + if index_weight == -1: + weight_expand_axes.append(i) + input_transpose_axes = [] + weight_transpose_axes = [] + for (i, label) in enumerate(output_spec): + index_input = input_spec.find(label) + index_weight = weight_spec.find(label) + if index_input != -1: + input_transpose_axes.append(index_input) + if index_weight != -1: + weight_transpose_axes.append(index_weight) + input_squeeze_axes = [] + weight_squeeze_axes = [] + for ori_index in input_reduced_axes: + try: + index = input_expand_axes.pop(0) + except IndexError: + input_squeeze_axes.append(ori_index) + input_transpose_axes.insert(index, ori_index) + for ori_index in weight_reduced_axes: + try: + index = weight_expand_axes.pop(0) + except IndexError: + weight_squeeze_axes.append(ori_index) + weight_transpose_axes.insert(index, ori_index) + custom_gradient_equation = f'{output_spec},{weight_spec}->{input_spec}' + weight_reverse_transpose_axes = [i for (_, i) in sorted(((v, i) for (i, v) in enumerate(weight_transpose_axes)))] + return (input_reduced_axes, weight_reduced_axes, input_transpose_axes, weight_transpose_axes, input_expand_axes, weight_expand_axes, input_squeeze_axes, weight_squeeze_axes, custom_gradient_equation, weight_reverse_transpose_axes) + +# File: keras-master/keras/src/layers/core/embedding.py +import warnings +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Embedding') +class Embedding(Layer): + + def __init__(self, input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, embeddings_constraint=None, mask_zero=False, weights=None, lora_rank=None, **kwargs): + input_length = kwargs.pop('input_length', None) + if input_length is not None: + warnings.warn('Argument `input_length` is deprecated. Just remove it.') + super().__init__(**kwargs) + self.input_dim = input_dim + self.output_dim = output_dim + self.embeddings_initializer = initializers.get(embeddings_initializer) + self.embeddings_regularizer = regularizers.get(embeddings_regularizer) + self.embeddings_constraint = constraints.get(embeddings_constraint) + self.mask_zero = mask_zero + self.supports_masking = mask_zero + self.autocast = False + self.lora_rank = lora_rank + self.lora_enabled = False + if weights is not None: + self.build() + if not (isinstance(weights, list) and len(weights) == 1): + weights = [weights] + self.set_weights(weights) + + def build(self, input_shape=None): + if self.built: + return + if self.quantization_mode is not None: + self.quantized_build(input_shape, mode=self.quantization_mode) + if self.quantization_mode != 'int8': + self._embeddings = self.add_weight(shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, name='embeddings', regularizer=self.embeddings_regularizer, constraint=self.embeddings_constraint, trainable=True) + self.built = True + if self.lora_rank: + self.enable_lora(self.lora_rank) + + @property + def embeddings(self): + if self.lora_enabled: + return self._embeddings + ops.matmul(self.lora_embeddings_a, self.lora_embeddings_b) + return self._embeddings + + def call(self, inputs): + if inputs.dtype != 'int32' and inputs.dtype != 'int64': + inputs = ops.cast(inputs, 'int32') + outputs = ops.take(self.embeddings, inputs, axis=0) + return ops.cast(outputs, dtype=self.compute_dtype) + + def compute_mask(self, inputs, mask=None): + if not self.mask_zero: + return None + return ops.not_equal(inputs, 0) + + def compute_output_shape(self, input_shape): + return input_shape + (self.output_dim,) + + def enable_lora(self, rank, a_initializer='he_uniform', b_initializer='zeros'): + if self.embeddings_constraint: + raise ValueError('Lora is incompatible with embedding constraints. In order to enable lora on this layer, remove the `embeddings_constraint` argument.') + if not self.built: + raise ValueError("Cannot enable lora on a layer that isn't yet built.") + if self.lora_enabled: + raise ValueError('lora is already enabled. This can only be done once per layer.') + self._tracker.unlock() + self.lora_embeddings_a = self.add_weight(name='lora_embeddings_a', shape=(self.embeddings.shape[0], rank), initializer=initializers.get(a_initializer), regularizer=self.embeddings_regularizer) + self.lora_embeddings_b = self.add_weight(name='lora_embeddings_b', shape=(rank, self.embeddings.shape[1]), initializer=initializers.get(b_initializer), regularizer=self.embeddings_regularizer) + self.embeddings.trainable = False + self._tracker.lock() + self.lora_enabled = True + self.lora_rank = rank + + def save_own_variables(self, store): + if not self.built: + return + (embeddings_value, embeddings_scale) = self._get_embeddings_with_merged_lora() + target_variables = [embeddings_value] + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(embeddings_scale) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + store[str(i)] = variable + + def load_own_variables(self, store): + if not self.lora_enabled: + self._check_load_own_variables(store) + if not self.built: + return + target_variables = [self._embeddings] + if self.quantization_mode is not None: + if self.quantization_mode == 'int8': + target_variables.append(self.embeddings_scale) + else: + raise self._quantization_mode_error(self.quantization_mode) + for (i, variable) in enumerate(target_variables): + variable.assign(store[str(i)]) + if self.lora_enabled: + self.lora_embeddings_a.assign(ops.zeros(self.lora_embeddings_a.shape)) + self.lora_embeddings_b.assign(ops.zeros(self.lora_embeddings_b.shape)) + + def get_config(self): + base_config = super().get_config() + config = {'input_dim': self.input_dim, 'output_dim': self.output_dim, 'embeddings_initializer': initializers.serialize(self.embeddings_initializer), 'embeddings_regularizer': regularizers.serialize(self.embeddings_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'embeddings_constraint': constraints.serialize(self.embeddings_constraint), 'mask_zero': self.mask_zero} + if self.lora_rank: + config['lora_rank'] = self.lora_rank + return {**base_config, **config} + + def _check_load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and (not self.built): + raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.") + raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}") + '' + + def _quantization_mode_error(self, mode): + return NotImplementedError(f"Invalid quantization mode. Expected 'int8'. Received: quantization_mode={mode}") + + def quantized_build(self, input_shape, mode): + if mode == 'int8': + self._int8_build() + else: + raise self._quantization_mode_error(mode) + + def _int8_build(self, embeddings_initializer='zeros', embeddings_scale_initializer='ones'): + self._embeddings = self.add_weight(name='embeddings', shape=(self.input_dim, self.output_dim), initializer=embeddings_initializer, dtype='int8', trainable=False) + self.embeddings_scale = self.add_weight(name='embeddings_scale', shape=(self.input_dim,), initializer=embeddings_scale_initializer, trainable=False) + self._is_quantized = True + + def quantized_call(self, *args, **kwargs): + if self.quantization_mode != 'int8': + raise self._quantization_mode_error(self.quantization_mode) + return super().quantized_call(*args, **kwargs) + + def _int8_call(self, inputs, training=None): + if backend.standardize_dtype(inputs.dtype) not in ('int32', 'int64'): + inputs = ops.cast(inputs, 'int32') + embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0) + outputs = ops.take(self._embeddings, inputs, axis=0) + outputs = ops.divide(ops.cast(outputs, dtype=self.compute_dtype), ops.expand_dims(embeddings_scale, axis=-1)) + if self.lora_enabled: + lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0) + lora_outputs = ops.matmul(lora_outputs, self.lora_embeddings_b) + outputs = ops.add(outputs, lora_outputs) + return outputs + + def quantize(self, mode, type_check=True): + if type_check and type(self) is not Embedding: + raise self._not_implemented_error(self.quantize) + if mode == 'int8': + (embeddings_value, embeddings_scale) = quantizers.abs_max_quantize(self._embeddings, axis=-1, to_numpy=True) + embeddings_scale = ops.squeeze(embeddings_scale, axis=-1) + del self._embeddings + self._int8_build(lambda shape, dtype: embeddings_value, lambda shape, dtype: embeddings_scale) + else: + raise self._quantization_mode_error(mode) + if self.dtype_policy.quantization_mode is None: + policy = dtype_policies.get(f'{mode}_from_{self.dtype_policy.name}') + self.dtype_policy = policy + + def _get_embeddings_with_merged_lora(self): + if self.dtype_policy.quantization_mode is not None: + embeddings_value = self._embeddings + embeddings_scale = self.embeddings_scale + if self.lora_enabled: + embeddings_value = ops.divide(embeddings_value, ops.expand_dims(embeddings_scale, axis=-1)) + embeddings_value = ops.add(embeddings_value, ops.matmul(self.lora_embeddings_a, self.lora_embeddings_b)) + (embeddings_value, embeddings_scale) = quantizers.abs_max_quantize(embeddings_value, axis=-1, to_numpy=True) + embeddings_scale = ops.squeeze(embeddings_scale, axis=-1) + return (embeddings_value, embeddings_scale) + return (self.embeddings, None) + +# File: keras-master/keras/src/layers/core/identity.py +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Identity') +class Identity(Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self.built = True + + def call(self, inputs): + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def compute_output_spec(self, inputs): + return tree.map_structure(lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse), inputs) + +# File: keras-master/keras/src/layers/core/input_layer.py +import warnings +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.ops.node import Node + +@keras_export('keras.layers.InputLayer') +class InputLayer(Layer): + + def __init__(self, shape=None, batch_size=None, dtype=None, sparse=None, batch_shape=None, input_tensor=None, optional=False, name=None, **kwargs): + super().__init__(name=name) + if 'input_shape' in kwargs: + warnings.warn('Argument `input_shape` is deprecated. Use `shape` instead.') + shape = kwargs.pop('input_shape') + if 'batch_input_shape' in kwargs: + batch_shape = kwargs.pop('batch_input_shape') + if shape is not None and batch_shape is not None: + raise ValueError('You cannot pass both `shape` and `batch_shape` at the same time.') + if batch_size is not None and batch_shape is not None: + raise ValueError('You cannot pass both `batch_size` and `batch_shape` at the same time.') + if shape is None and batch_shape is None: + raise ValueError('You must pass a `shape` argument.') + if shape is not None: + shape = backend.standardize_shape(shape) + batch_shape = (batch_size,) + shape + self._batch_shape = backend.standardize_shape(batch_shape) + self._dtype = backend.standardize_dtype(dtype) + self.sparse = bool(sparse) + if self.sparse and (not backend.SUPPORTS_SPARSE_TENSORS): + raise ValueError(f'`sparse=True` is not supported with backend: {backend.backend()}') + if input_tensor is not None: + if not isinstance(input_tensor, backend.KerasTensor): + raise ValueError(f'Argument `input_tensor` must be a KerasTensor. Received invalid type: input_tensor={input_tensor} (of type {type(input_tensor)})') + else: + input_tensor = backend.KerasTensor(shape=batch_shape, dtype=dtype, sparse=sparse, name=name) + self._input_tensor = input_tensor + Node(operation=self, call_args=(), call_kwargs={}, outputs=input_tensor) + self.built = True + self.optional = optional + + def call(self): + return + + @property + def batch_shape(self): + return self._batch_shape + + @property + def dtype(self): + return self._dtype + + def get_config(self): + return {'batch_shape': self.batch_shape, 'dtype': self.dtype, 'sparse': self.sparse, 'name': self.name} + +@keras_export(['keras.layers.Input', 'keras.Input']) +def Input(shape=None, batch_size=None, dtype=None, sparse=None, batch_shape=None, name=None, tensor=None, optional=False): + layer = InputLayer(shape=shape, batch_size=batch_size, dtype=dtype, sparse=sparse, batch_shape=batch_shape, name=name, input_tensor=tensor, optional=optional) + return layer.output + +# File: keras-master/keras/src/layers/core/lambda_layer.py +import inspect +import types +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib +from keras.src.utils import python_utils + +@keras_export('keras.layers.Lambda') +class Lambda(Layer): + + def __init__(self, function, output_shape=None, mask=None, arguments=None, **kwargs): + super().__init__(**kwargs) + self.arguments = arguments or {} + self.function = function + if mask is not None: + self.supports_masking = True + else: + self.supports_masking = False + self.mask = mask + self._output_shape = output_shape + self._already_warned = False + function_args = inspect.getfullargspec(function).args + self._fn_expects_training_arg = 'training' in function_args + self._fn_expects_mask_arg = 'mask' in function_args + + def compute_output_shape(self, input_shape): + if self._output_shape is None: + try: + inputs = tree.map_shape_structure(lambda x: backend.KerasTensor(x, dtype=self.compute_dtype), input_shape) + output_spec = backend.compute_output_spec(self.call, inputs) + return tree.map_structure(lambda x: x.shape, output_spec) + except: + raise NotImplementedError("We could not automatically infer the shape of the Lambda's output. Please specify the `output_shape` argument for this Lambda layer.") + if callable(self._output_shape): + return self._output_shape(input_shape) + batch_size = tree.flatten(input_shape)[0] + + def _add_batch(shape): + return (batch_size,) + shape + return tree.map_shape_structure(_add_batch, self._output_shape) + + def call(self, inputs, mask=None, training=None): + kwargs = {k: v for (k, v) in self.arguments.items()} + if self._fn_expects_mask_arg: + kwargs['mask'] = mask + if self._fn_expects_training_arg: + kwargs['training'] = training + return self.function(inputs, **kwargs) + + def compute_mask(self, inputs, mask=None): + if callable(self.mask): + return self.mask(inputs, mask) + return self.mask + + def get_config(self): + config = {'function': self._serialize_function_to_config(self.function)} + if self._output_shape is not None: + if callable(self._output_shape): + output_shape = self._serialize_function_to_config(self._output_shape) + else: + output_shape = self._output_shape + config['output_shape'] = output_shape + if self.mask is not None: + if callable(self.mask): + mask = self._serialize_function_to_config(self.mask) + else: + mask = serialization_lib.serialize_keras_object(self.mask) + config['mask'] = mask + config['arguments'] = serialization_lib.serialize_keras_object(self.arguments) + base_config = super().get_config() + return {**base_config, **config} + + def _serialize_function_to_config(self, fn): + if isinstance(fn, types.LambdaType) and fn.__name__ == '': + (code, defaults, closure) = python_utils.func_dump(fn) + return {'class_name': '__lambda__', 'config': {'code': code, 'defaults': defaults, 'closure': closure}} + elif callable(fn): + return serialization_lib.serialize_keras_object(fn) + raise ValueError(f'Invalid input type for serialization. Received: {fn} of type {type(fn)}.') + + @staticmethod + def _raise_for_lambda_deserialization(arg_name, safe_mode): + if safe_mode: + raise ValueError('The `{arg_name}` of this `Lambda` layer is a Python lambda. Deserializing it is unsafe. If you trust the source of the config artifact, you can override this error by passing `safe_mode=False` to `from_config()`, or calling `keras.config.enable_unsafe_deserialization().') + + @classmethod + def from_config(cls, config, custom_objects=None, safe_mode=None): + safe_mode = safe_mode or serialization_lib.in_safe_mode() + fn_config = config['function'] + if isinstance(fn_config, dict) and 'class_name' in fn_config and (fn_config['class_name'] == '__lambda__'): + cls._raise_for_lambda_deserialization('function', safe_mode) + inner_config = fn_config['config'] + fn = python_utils.func_load(inner_config['code'], defaults=inner_config['defaults'], closure=inner_config['closure']) + config['function'] = fn + else: + config['function'] = serialization_lib.deserialize_keras_object(fn_config, custom_objects=custom_objects) + if 'output_shape' in config: + fn_config = config['output_shape'] + if isinstance(fn_config, dict) and 'class_name' in fn_config and (fn_config['class_name'] == '__lambda__'): + cls._raise_for_lambda_deserialization('function', safe_mode) + inner_config = fn_config['config'] + fn = python_utils.func_load(inner_config['code'], defaults=inner_config['defaults'], closure=inner_config['closure']) + config['output_shape'] = fn + else: + output_shape = serialization_lib.deserialize_keras_object(fn_config, custom_objects=custom_objects) + if isinstance(output_shape, list) and all((isinstance(e, (int, type(None))) for e in output_shape)): + output_shape = tuple(output_shape) + config['output_shape'] = output_shape + if 'arguments' in config: + config['arguments'] = serialization_lib.deserialize_keras_object(config['arguments'], custom_objects=custom_objects) + return cls(**config) + +# File: keras-master/keras/src/layers/core/masking.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Masking') +class Masking(Layer): + + def __init__(self, mask_value=0.0, **kwargs): + super().__init__(**kwargs) + self.mask_value = mask_value + self.supports_masking = True + self.built = True + + def compute_mask(self, inputs, mask=None): + return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1) + + def call(self, inputs): + boolean_mask = ops.any(ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True) + outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype) + try: + outputs._keras_mask = ops.squeeze(boolean_mask, axis=-1) + except AttributeError: + pass + return outputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'mask_value': self.mask_value} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/core/wrapper.py +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib + +@keras_export('keras.layers.Wrapper') +class Wrapper(Layer): + + def __init__(self, layer, **kwargs): + try: + assert isinstance(layer, Layer) + except Exception: + raise ValueError(f"Layer {layer} supplied to Wrapper isn't a supported layer type. Please ensure wrapped layer is a valid Keras layer.") + super().__init__(**kwargs) + self.layer = layer + + def build(self, input_shape=None): + if not self.layer.built: + self.layer.build(input_shape) + self.layer.built = True + self.built = True + + def get_config(self): + config = {'layer': serialization_lib.serialize_keras_object(self.layer)} + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + layer = serialization_lib.deserialize_keras_object(config.pop('layer'), custom_objects=custom_objects) + return cls(layer, **config) + +# File: keras-master/keras/src/layers/input_spec.py +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export + +@keras_export(['keras.InputSpec', 'keras.layers.InputSpec']) +class InputSpec: + + def __init__(self, dtype=None, shape=None, ndim=None, max_ndim=None, min_ndim=None, axes=None, allow_last_axis_squeeze=False, name=None, optional=False): + self.dtype = backend.standardize_dtype(dtype) if dtype is not None else None + if shape is not None: + self.shape = backend.standardize_shape(shape) + self.ndim = len(shape) + else: + self.ndim = ndim + self.shape = None + self.max_ndim = max_ndim + self.min_ndim = min_ndim + self.name = name + self.optional = optional + self.allow_last_axis_squeeze = allow_last_axis_squeeze + try: + axes = axes or {} + self.axes = {int(k): axes[k] for k in axes} + except (ValueError, TypeError): + raise TypeError(f'Argument `axes` must be a dict with integer keys. Received: axes={axes}') + if self.axes and (self.ndim is not None or self.max_ndim is not None): + max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 + max_axis = max(self.axes) + if max_axis > max_dim: + raise ValueError('Axis {} is greater than the maximum allowed value: {}'.format(max_axis, max_dim)) + + def __repr__(self): + spec = ['dtype=' + str(self.dtype) if self.dtype else '', 'shape=' + str(self.shape) if self.shape else '', 'ndim=' + str(self.ndim) if self.ndim else '', 'max_ndim=' + str(self.max_ndim) if self.max_ndim else '', 'min_ndim=' + str(self.min_ndim) if self.min_ndim else '', 'axes=' + str(self.axes) if self.axes else ''] + return f"InputSpec({', '.join((x for x in spec if x))})" + + def get_config(self): + return {'dtype': self.dtype, 'shape': self.shape, 'ndim': self.ndim, 'max_ndim': self.max_ndim, 'min_ndim': self.min_ndim, 'axes': self.axes} + + @classmethod + def from_config(cls, config): + return cls(**config) + +def assert_input_compatibility(input_spec, inputs, layer_name): + if not input_spec: + return + input_spec = tree.flatten(input_spec) + if isinstance(inputs, dict): + names = [spec.name for spec in input_spec] + if all(names): + list_inputs = [] + for name in names: + if name not in inputs: + raise ValueError(f'Missing data for input "{name}". You passed a data dictionary with keys {list(inputs.keys())}. Expected the following keys: {names}') + list_inputs.append(inputs[name]) + inputs = list_inputs + inputs = tree.flatten(inputs) + if len(inputs) != len(input_spec): + raise ValueError(f'Layer "{layer_name}" expects {len(input_spec)} input(s), but it received {len(inputs)} input tensors. Inputs received: {inputs}') + for (input_index, (x, spec)) in enumerate(zip(inputs, input_spec)): + if spec is None: + continue + if x is None and spec.optional: + continue + if not hasattr(x, 'shape'): + raise ValueError(f"Inputs to a layer should be tensors. Got '{x}' (of type {type(x)}) as input for layer '{layer_name}'.") + shape = backend.standardize_shape(x.shape) + ndim = len(shape) + if spec.ndim is not None and (not spec.allow_last_axis_squeeze): + if ndim != spec.ndim: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected ndim={spec.ndim}, found ndim={ndim}. Full shape received: {shape}') + if spec.max_ndim is not None: + if ndim is not None and ndim > spec.max_ndim: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected max_ndim={spec.max_ndim}, found ndim={ndim}') + if spec.min_ndim is not None: + if ndim is not None and ndim < spec.min_ndim: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected min_ndim={spec.min_ndim}, found ndim={ndim}. Full shape received: {shape}') + if spec.dtype is not None: + dtype = backend.standardize_dtype(x.dtype) + if dtype != spec.dtype: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected dtype={spec.dtype}, found dtype={dtype}') + if spec.axes: + for (axis, value) in spec.axes.items(): + if value is not None and shape[axis] not in {value, None}: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected axis {axis} of input shape to have value {value}, but received input with shape {shape}') + if spec.shape is not None: + spec_shape = spec.shape + if spec.allow_last_axis_squeeze: + if shape and shape[-1] == 1: + shape = shape[:-1] + if spec_shape and spec_shape[-1] == 1: + spec_shape = spec_shape[:-1] + for (spec_dim, dim) in zip(spec_shape, shape): + if spec_dim is not None and dim is not None: + if spec_dim != dim: + raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected shape={spec.shape}, found shape={shape}') + +# File: keras-master/keras/src/layers/layer.py +"""""" +import collections +import inspect +import warnings +from functools import wraps +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import regularizers +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend.common import global_state +from keras.src.backend.common.name_scope import current_path +from keras.src.backend.common.symbolic_scope import in_symbolic_scope +from keras.src.distribution import distribution_lib +from keras.src.dtype_policies import DTypePolicyMap +from keras.src.layers import input_spec +from keras.src.metrics.metric import Metric +from keras.src.ops.operation import Operation +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils import python_utils +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils +from keras.src.utils import tracking +if backend.backend() == 'tensorflow': + from keras.src.backend.tensorflow.layer import TFLayer as BackendLayer +elif backend.backend() == 'jax': + from keras.src.backend.jax.layer import JaxLayer as BackendLayer +elif backend.backend() == 'torch': + from keras.src.backend.torch.layer import TorchLayer as BackendLayer +elif backend.backend() == 'numpy': + from keras.src.backend.numpy.layer import NumpyLayer as BackendLayer +else: + raise RuntimeError(f"Backend '{backend.backend()}' must implement a layer mixin class.") + +@keras_export(['keras.Layer', 'keras.layers.Layer']) +class Layer(BackendLayer, Operation, KerasSaveable): + + def __new__(cls, *args, **kwargs): + obj = super().__new__(cls, *args, **kwargs) + original_build_method = obj.build + + @wraps(original_build_method) + def build_wrapper(*args, **kwargs): + with obj._open_name_scope(): + obj._path = current_path() + original_build_method(*args, **kwargs) + signature = inspect.signature(original_build_method) + obj._build_shapes_dict = signature.bind(*args, **kwargs).arguments + obj.built = True + obj._post_build() + obj._lock_state() + obj.build = build_wrapper + original_quantize_method = obj.quantize + + @wraps(original_quantize_method) + def quantize_wrapper(mode, **kwargs): + obj._check_quantize_args(mode, obj.compute_dtype) + obj._tracker.unlock() + try: + original_quantize_method(mode, **kwargs) + except Exception: + raise + finally: + obj._tracker.lock() + obj.quantize = quantize_wrapper + return obj + + def __init__(self, *, activity_regularizer=None, trainable=True, dtype=None, autocast=True, name=None, **kwargs): + BackendLayer.__init__(self) + self._lock = False + Operation.__init__(self, dtype=dtype, name=name) + self.activity_regularizer = regularizers.get(activity_regularizer) + input_dim_arg = kwargs.pop('input_dim', None) + if input_dim_arg is not None: + input_shape_arg = (input_dim_arg,) + else: + input_shape_arg = kwargs.pop('input_shape', None) + if input_shape_arg is not None: + warnings.warn('Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.', stacklevel=2) + self._input_shape_arg = input_shape_arg + if kwargs: + raise ValueError(f'Unrecognized keyword arguments passed to {self.__class__.__name__}: {kwargs}') + self._path = None + self.built = False + self.autocast = autocast + self._input_spec = None + self._called = False + self.supports_jit = True + self._trainable = trainable + self._losses = [] + self._loss_ids = set() + self._losses_override = [] + self._call_signature = inspect.signature(self.call) + call_signature_parameters = [p.name for p in self._call_signature.parameters.values()] + self._call_has_training_arg = 'training' in call_signature_parameters + self._call_has_mask_arg = 'mask' in call_signature_parameters + self._supports_masking = not utils.is_default(self.compute_mask) + self._convert_input_args = True + self._allow_non_tensor_positional_args = False + self._build_shapes_dict = None + self._parent_path = None + self._initialize_tracker() + + @tracking.no_automatic_dependency_tracking + def _initialize_tracker(self): + if hasattr(self, '_tracker'): + return + trainable_variables = [] + non_trainable_variables = [] + layers = [] + metrics = [] + seed_generators = [] + self._tracker = tracking.Tracker({'trainable_variables': (lambda x: isinstance(x, backend.Variable) and x.trainable, trainable_variables), 'non_trainable_variables': (lambda x: isinstance(x, backend.Variable) and (not x.trainable), non_trainable_variables), 'metrics': (lambda x: isinstance(x, Metric), metrics), 'layers': (lambda x: isinstance(x, Layer) and (not isinstance(x, Metric)), layers), 'seed_generators': (lambda x: isinstance(x, backend.random.SeedGenerator), seed_generators)}, exclusions={'non_trainable_variables': ['trainable_variables']}) + if backend.backend() == 'tensorflow': + _self_setattr_tracking = getattr(self, '_self_setattr_tracking', True) + self._self_setattr_tracking = False + self._trainable_variables = trainable_variables + self._non_trainable_variables = non_trainable_variables + self._layers = layers + self._metrics = metrics + self._seed_generators = seed_generators + if backend.backend() == 'tensorflow': + self._self_setattr_tracking = _self_setattr_tracking + + @property + def path(self): + return self._path + + @property + def input_spec(self): + return self._input_spec + + @input_spec.setter + def input_spec(self, value): + self._input_spec = value + + @utils.default + def build(self, input_shape): + self._check_super_called() + if utils.is_default(self.build) and might_have_unbuilt_state(self): + warnings.warn(f"`build()` was called on layer '{self.name}', however the layer does not have a `build()` method implemented and it looks like it has unbuilt state. This will cause the layer to be marked as built, despite not being actually built, which may cause failures down the line. Make sure to implement a proper `build()` method.") + self.built = True + + def _lock_state(self): + if not self._tracker.locked: + self._tracker.lock(msg='You cannot add new elements of state (variables or sub-layers) to a layer that is already built. All state must be created in the `__init__()` method or in the `build()` method.') + + def get_build_config(self): + if self._build_shapes_dict is not None: + if len(self._build_shapes_dict) == 1: + return {'input_shape': tuple(self._build_shapes_dict.values())[0]} + else: + return {'shapes_dict': self._build_shapes_dict} + + def build_from_config(self, config): + if config: + if 'input_shape' in config: + self.build(config['input_shape']) + elif 'shapes_dict' in config: + self.build(**config['shapes_dict']) + self.built = True + + def _obj_type(self): + return 'Layer' + + def add_variable(self, shape, initializer, dtype=None, trainable=True, autocast=True, regularizer=None, constraint=None, name=None): + return self.add_weight(shape=shape, initializer=initializer, dtype=dtype, trainable=trainable, autocast=autocast, regularizer=regularizer, constraint=constraint, name=name) + + def add_weight(self, shape=None, initializer=None, dtype=None, trainable=True, autocast=True, regularizer=None, constraint=None, aggregation='mean', name=None): + self._check_super_called() + if shape is None: + shape = () + if dtype is not None: + dtype = backend.standardize_dtype(dtype) + else: + dtype = self.variable_dtype + if initializer is None: + if 'float' in dtype: + initializer = 'glorot_uniform' + else: + initializer = 'zeros' + initializer = initializers.get(initializer) + with backend.name_scope(self.name, caller=self): + variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=trainable, autocast=autocast, aggregation=aggregation, name=name) + variable.regularizer = regularizers.get(regularizer) + variable.constraint = constraints.get(constraint) + self._track_variable(variable) + return variable + + @property + def trainable(self): + return self._trainable + + @trainable.setter + def trainable(self, value): + value = bool(value) + self._trainable = value + for v in self._trainable_variables: + v.trainable = value + for layer in self._layers: + layer.trainable = value + + @property + def variables(self): + variables = [] + seen_ids = set() + for v in self._trainable_variables + self._non_trainable_variables: + if id(v) not in seen_ids: + variables.append(v) + seen_ids.add(id(v)) + for sg in self._seed_generators: + variables.append(sg.state) + for layer in self._layers: + for v in layer.variables: + if id(v) not in seen_ids: + variables.append(v) + seen_ids.add(id(v)) + return variables + + @property + def trainable_variables(self): + if not self.trainable: + return [] + return [v for v in self.variables if v.trainable] + + @property + def non_trainable_variables(self): + if not self.trainable: + return self.variables + return [v for v in self.variables if not v.trainable] + + @property + def weights(self): + weights = [] + seen_ids = set() + for w in self._trainable_variables + self._non_trainable_variables: + if id(w) not in seen_ids: + weights.append(w) + seen_ids.add(id(w)) + for layer in self._layers: + for w in layer.weights: + if id(w) not in seen_ids: + weights.append(w) + seen_ids.add(id(w)) + return weights + + @property + def trainable_weights(self): + if not self.trainable: + return [] + return [v for v in self.weights if v.trainable] + + @property + def non_trainable_weights(self): + if not self.trainable: + return self.weights + return [v for v in self.weights if not v.trainable] + + @property + def metrics(self): + metrics = list(self._metrics) + for layer in self._layers: + metrics.extend(layer.metrics) + return metrics + + @property + def metrics_variables(self): + vars = [] + for metric in self.metrics: + vars.extend(metric.variables) + return vars + + def get_weights(self): + return [v.numpy() for v in self.weights] + + def set_weights(self, weights): + layer_weights = self.weights + if len(layer_weights) != len(weights): + raise ValueError(f"You called `set_weights(weights)` on layer '{self.name}' with a weight list of length {len(weights)}, but the layer was expecting {len(layer_weights)} weights.") + for (variable, value) in zip(layer_weights, weights): + if variable.shape != value.shape: + raise ValueError(f'Layer {self.name} weight shape {variable.shape} is not compatible with provided weight shape {value.shape}.') + variable.assign(value) + + @property + def dtype_policy(self): + return self._dtype_policy + + @dtype_policy.setter + def dtype_policy(self, value): + policy = dtype_policies.get(value) + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + if self.path in self._dtype_policy: + del self._dtype_policy[self.path] + self._dtype_policy[self.path] = policy + else: + self._dtype_policy = policy + if policy.quantization_mode is not None: + if self.built and (not getattr(self, '_is_quantized', False)): + self.quantize(policy.quantization_mode) + + @property + def dtype(self): + return self.variable_dtype + + @property + def compute_dtype(self): + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.compute_dtype + + @property + def variable_dtype(self): + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.variable_dtype + + @property + def quantization_mode(self): + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.quantization_mode + + @property + def input_dtype(self): + return self.compute_dtype + + @property + def supports_masking(self): + return self._supports_masking + + @supports_masking.setter + def supports_masking(self, value): + self._supports_masking = value + + @utils.default + def compute_mask(self, inputs, previous_mask): + return previous_mask + + @traceback_utils.filter_traceback + def __call__(self, *args, **kwargs): + self._check_super_called() + self._called = True + + def maybe_convert(x): + return self.dtype_policy.convert_input(x, self.autocast, self.input_dtype) + if (kwargs or len(args) != 1 or (not backend.is_tensor(args[0])) or (backend.standardize_dtype(args[0].dtype) != self.input_dtype)) and self._convert_input_args: + args = tree.map_structure(maybe_convert, args) + kwargs = tree.map_structure(maybe_convert, kwargs) + if not self._allow_non_tensor_positional_args: + for arg in tree.flatten(args): + if not isinstance(arg, KerasTensor) and (not backend.is_tensor(arg)) and (arg is not None): + raise ValueError(f'Only input tensors may be passed as positional arguments. The following argument value should be passed as a keyword argument: {arg} (of type {type(arg)})') + call_spec = CallSpec(self._call_signature, args, kwargs) + self._assert_input_compatibility(call_spec.first_arg) + with self._open_name_scope(): + self._maybe_build(call_spec) + call_context = self._get_call_context() + training = call_spec.user_arguments_dict.get('training', None) + if training is None: + training = call_context.training + if training is None: + training = call_spec.arguments_dict.get('training', None) + call_context.training = training + if self._call_has_training_arg and training is not None: + kwargs['training'] = training + if len(call_spec.tensor_arguments_dict) == 1: + if 'mask' in call_spec.argument_names and call_spec.arguments_dict['mask'] is None: + arg_name = list(call_spec.tensor_arguments_dict.keys())[0] + only_tensor_arg = call_spec.tensor_arguments_dict[arg_name] + mask = tree.map_structure(lambda x: getattr(x, '_keras_mask', None), only_tensor_arg) + kwargs['mask'] = mask + elif len(call_spec.tensor_arguments_dict) > 1: + for (k, v) in call_spec.tensor_arguments_dict.items(): + expected_mask_arg_name = f'{k}_mask' + if expected_mask_arg_name in call_spec.argument_names: + if call_spec.arguments_dict[expected_mask_arg_name] is None: + mask = tree.map_structure(lambda x: getattr(x, '_keras_mask', None), v) + kwargs[expected_mask_arg_name] = mask + try: + with self._open_name_scope(): + current_scope = backend.get_autocast_scope() + new_scope = None + if current_scope is not None: + if not self.autocast: + new_scope = backend.AutocastScope(None) + elif not backend.is_float_dtype(self.compute_dtype): + new_scope = backend.AutocastScope(None) + elif current_scope.dtype != self.compute_dtype: + new_scope = backend.AutocastScope(self.compute_dtype) + elif self.compute_dtype != self.variable_dtype: + new_scope = backend.AutocastScope(self.compute_dtype) + if new_scope is not None: + with new_scope: + outputs = super().__call__(*args, **kwargs) + else: + outputs = super().__call__(*args, **kwargs) + distribution = distribution_lib.distribution() + if distribution is not None: + current_layer_path = current_path() + current_layer_path += '/output' + layout = distribution.get_tensor_layout(current_layer_path) + if layout: + outputs = distribution_lib.distribute_tensor(outputs, layout) + if not self.built: + self.built = True + if self.activity_regularizer is not None: + for output in tree.flatten(outputs): + if backend.is_tensor(output): + self.add_loss(self.activity_regularizer(output)) + previous_mask = tree.map_structure(lambda x: getattr(x, '_keras_mask', None), call_spec.first_arg) + if self.supports_masking: + self._set_mask_metadata(call_spec.first_arg, outputs, previous_mask) + elif any((m is not None for m in tree.flatten(previous_mask))): + warnings.warn(f"Layer '{self.name}' (of type {self.__class__.__name__}) was passed an input with a mask attached to it. However, this layer does not support masking and will therefore destroy the mask information. Downstream layers will not see the mask.") + finally: + self._maybe_reset_call_context() + return outputs + + def call(self, *args, **kwargs): + raise self._not_implemented_error(self.call) + + @traceback_utils.filter_traceback + def stateless_call(self, trainable_variables, non_trainable_variables, *args, return_losses=False, **kwargs): + self._check_super_called() + if not self.built: + raise ValueError(f'To call stateless_call, {self.__class__.__name__} must be built (i.e. its variables must have been already created). You can build it by calling it on some data.') + if len(trainable_variables) != len(self.trainable_variables): + raise ValueError(f'Argument `trainable_variables` must be a list of tensors corresponding 1:1 to {self.__class__.__name__}().trainable_variables. Received list with length {len(trainable_variables)}, but expected {len(self.trainable_variables)} variables.') + if len(non_trainable_variables) != len(self.non_trainable_variables): + raise ValueError(f'Argument `non_trainable_variables` must be a list of tensors corresponding 1:1 to {self.__class__.__name__}().non_trainable_variables. Received list with length {len(non_trainable_variables)}, but expected {len(self.non_trainable_variables)} variables.') + trainable_mapping = zip(self.trainable_variables, trainable_variables) + non_trainable_mapping = zip(self.non_trainable_variables, non_trainable_variables) + mapping = list(trainable_mapping) + list(non_trainable_mapping) + losses = None + with backend.StatelessScope(state_mapping=mapping, collect_losses=return_losses) as scope: + if self.dtype_policy.quantization_mode is not None: + outputs = self.quantized_call(*args, **kwargs) + else: + outputs = self.call(*args, **kwargs) + if return_losses: + losses = self.losses + non_trainable_variables = [] + for v in self.non_trainable_variables: + new_v = scope.get_current_value(v) + non_trainable_variables.append(new_v) + if return_losses: + return (outputs, non_trainable_variables, losses) + return (outputs, non_trainable_variables) + + def compute_output_spec(self, *args, **kwargs): + if utils.is_default(self.compute_output_shape): + return super().compute_output_spec(*args, **kwargs) + else: + call_spec = CallSpec(self._call_signature, args, kwargs) + shapes_dict = get_shapes_dict(call_spec) + shapes_dict = update_shapes_dict_for_target_fn(self.compute_output_shape, shapes_dict=shapes_dict, call_spec=call_spec, class_name=self.__class__.__name__) + output_shape = self.compute_output_shape(**shapes_dict) + if isinstance(output_shape, list) and output_shape and isinstance(output_shape[0], (int, type(None))): + output_shape = tuple(output_shape) + if not isinstance(output_shape, (list, tuple, dict)): + try: + output_shape = tuple(output_shape) + except: + raise ValueError(f'Method `compute_output_shape()` of layer {self.__class__.__name__} is returning a type that cannot be interpreted as a shape. It should return a shape tuple. Received: {output_shape}') + if isinstance(output_shape, tuple) and output_shape and isinstance(output_shape[0], (int, type(None))): + return KerasTensor(output_shape, dtype=self.compute_dtype) + return tree.map_shape_structure(lambda s: KerasTensor(s, dtype=self.compute_dtype), output_shape) + + @utils.default + def compute_output_shape(self, *args, **kwargs): + raise self._not_implemented_error(self.compute_output_shape, 'Should implement `def compute_output_shape(self, input_shape)`.') + + def add_loss(self, loss): + losses = tree.flatten(loss) + for x in losses: + if not backend.is_tensor(x): + raise ValueError(f'`add_loss()` can only be called from inside `build()` or `call()`, on a tensor input. Received invalid value: {x}') + if backend.in_stateless_scope(): + scope = backend.get_stateless_scope() + if scope.collect_losses: + for x in losses: + scope.add_loss(loss) + self._loss_ids.add(id(loss)) + else: + self._losses.extend(losses) + + def _get_own_losses(self): + if backend.in_stateless_scope(): + losses = [] + scope = backend.get_stateless_scope() + for loss in scope.losses: + if id(loss) in self._loss_ids: + losses.append(loss) + return losses + else: + return self._losses[:] + + def _get_regularization_losses(self): + weight_regularization_losses = [] + for variable in self.trainable_weights: + if variable.regularizer is None: + continue + if backend.in_stateless_scope() and (not in_symbolic_scope()): + v = backend.get_stateless_scope().get_current_value(variable) + else: + v = variable + weight_regularization_losses.append(variable.regularizer(v)) + return weight_regularization_losses + + @property + def losses(self): + if self._losses_override: + return self._losses_override + losses = self._get_own_losses() + for layer in self._flatten_layers(include_self=False): + losses.extend(layer._get_own_losses()) + weight_regularization_losses = self._get_regularization_losses() + losses.extend(weight_regularization_losses) + return losses + + def _clear_losses(self): + if backend.in_stateless_scope(): + scope = backend.get_stateless_scope() + if scope.collect_losses: + for x in scope.losses: + if id(x) in self._loss_ids: + scope.losses.remove(x) + self._losses.clear() + self._loss_ids.clear() + for layer in self._layers: + layer._clear_losses() + + def quantized_build(self, input_shape, mode): + raise self._not_implemented_error(self.quantized_build) + + def quantize(self, mode, type_check=True): + raise self._not_implemented_error(self.quantize) + + def _check_quantize_args(self, mode, compute_dtype): + if not self.built: + raise ValueError(f"Cannot quantize a layer that isn't yet built. Layer '{self.name}' (of type '{self.__class__.__name__}') is not built yet.") + if getattr(self, '_is_quantized', False): + raise ValueError(f"Layer '{self.name}' is already quantized with dtype_policy='{self.dtype_policy.name}'. Received: mode={mode}") + if mode not in dtype_policies.QUANTIZATION_MODES: + raise ValueError(f'Invalid quantization mode. Expected one of {dtype_policies.QUANTIZATION_MODES}. Received: mode={mode}') + if mode == 'int8' and compute_dtype == 'float16': + raise ValueError(f"Quantization mode='{mode}' doesn't work well with compute_dtype='float16'. Consider loading model/layer with another dtype policy such as 'mixed_bfloat16' or 'mixed_float16' before calling `quantize()`.") + + def quantized_call(self, *args, **kwargs): + if self.quantization_mode == 'int8': + return self._int8_call(*args, **kwargs) + elif self.quantization_mode == 'float8': + return self._float8_call(*args, **kwargs) + else: + raise self._quantization_mode_error(self.quantization_mode) + + def _int8_call(self, *args, **kwargs): + raise self._not_implemented_error(self._int8_call) + + def _float8_call(self, *args, **kwargs): + raise self._not_implemented_error(self._float8_call) + + def _not_implemented_error(self, attr, msg=None): + if callable(attr): + attr_name = attr.__name__ + attr_type = 'method' + else: + attr_name = str(attr) + attr_type = 'attribute' + msg = ' ' + msg if msg is not None else '' + return NotImplementedError(f'Layer {self.__class__.__name__} does not have a `{attr_name}` {attr_type} implemented.{msg}') + + def _quantization_mode_error(self, mode): + return NotImplementedError(f'Invalid quantization mode. Expected one of {dtype_policies.QUANTIZATION_MODES}. Received: quantization_mode={mode}') + + def save_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + for (i, v) in enumerate(all_vars): + store[f'{i}'] = v + + def load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and (not self.built): + raise ValueError(f"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\nIn most cases, this error indicates that either:\n\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\n\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.") + raise ValueError(f"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}") + for (i, v) in enumerate(all_vars): + v.assign(store[f'{i}']) + + def _track_variable(self, variable): + if variable.trainable: + self._tracker.add_to_store('trainable_variables', variable) + else: + self._tracker.add_to_store('non_trainable_variables', variable) + if not self.trainable: + variable.trainable = False + self._post_track_variable(variable) + + def _untrack_variable(self, variable): + previous_lock_state = self._tracker.locked + self._tracker.unlock() + self._tracker.untrack(variable) + if previous_lock_state is True: + self._tracker.lock() + self._post_untrack_variable(variable) + + def add_metric(self, *args, **kwargs): + raise NotImplementedError('Layer `add_metric()` method is deprecated add your metric in `Model.compile(metrics=[...]).`') + + def count_params(self): + if not self.built: + raise ValueError(f"You tried to call `count_params` on layer '{self.name}', but the layer isn't built. You can build it manually via: `layer.build(input_shape)`.") + return summary_utils.count_params(self.weights) + + def _maybe_build(self, call_spec): + if self.built: + return + shapes_dict = get_shapes_dict(call_spec) + first_shape = next(iter(shapes_dict.values()), None) + if not utils.is_default(self.build): + shapes_dict = update_shapes_dict_for_target_fn(self.build, shapes_dict=shapes_dict, call_spec=call_spec, class_name=self.__class__.__name__) + self.build(**shapes_dict) + self._assert_input_compatibility(call_spec.first_arg) + return + if might_have_unbuilt_state(self): + try: + backend.compute_output_spec(self.call, **call_spec.arguments_dict) + except Exception as e: + if call_spec.eager: + return + warnings.warn(f"Layer '{self.name}' looks like it has unbuilt state, but Keras is not able to trace the layer `call()` in order to build it automatically. Possible causes:\n1. The `call()` method of your layer may be crashing. Try to `__call__()` the layer eagerly on some test input first to see if it works. E.g. `x = np.random.random((3, 4)); y = layer(x)`\n2. If the `call()` method is correct, then you may need to implement the `def build(self, input_shape)` method on your layer. It should create all variables used by the layer (e.g. by calling `layer.build()` on all its children layers).\nException encountered: ''{e}''") + self.build(first_shape) + + def _build_by_run_for_single_pos_arg(self, input_shape): + input_tensors = tree.map_shape_structure(lambda s: backend.KerasTensor(s), input_shape) + try: + backend.compute_output_spec(self.call, input_tensors) + return True + except: + return False + + def _build_by_run_for_kwargs(self, shapes_dict): + if all((is_shape_tuple(s) for s in shapes_dict.values())): + input_tensors = {utils.removesuffix(k, '_shape'): backend.KerasTensor(shape) for (k, shape) in shapes_dict.items()} + try: + backend.compute_output_spec(self.call, **input_tensors) + return True + except: + return False + else: + return False + + def __repr__(self): + return f'<{self.__class__.__name__} name={self.name}, built={self.built}>' + + def __str__(self): + return self.__repr__() + + def __setattr__(self, name, value): + (name, value) = self._setattr_hook(name, value) + if name != '_tracker': + if not hasattr(self, '_tracker'): + self._initialize_tracker() + value = self._tracker.track(value) + return super().__setattr__(name, value) + + def __delattr__(self, name): + obj = getattr(self, name) + if isinstance(obj, backend.Variable): + import gc + self._untrack_variable(obj) + super().__delattr__(name) + gc.collect() + else: + super().__delattr__(name) + + def _check_super_called(self): + if getattr(self, '_lock', True): + raise RuntimeError(f"In layer '{self.__class__.__name__}', you forgot to call `super().__init__()` as the first statement in the `__init__()` method. Go add it!") + + def _assert_input_compatibility(self, arg_0): + if self.input_spec: + input_spec.assert_input_compatibility(self.input_spec, arg_0, layer_name=self.name) + + def _get_call_context(self): + layer_call_ctx = global_state.get_global_attribute('current_call_ctx') + if layer_call_ctx is None: + layer_call_ctx = CallContext(entry_layer=self) + global_state.set_global_attribute('current_call_ctx', layer_call_ctx) + self._clear_losses() + return layer_call_ctx + + def _maybe_reset_call_context(self): + layer_call_ctx = global_state.get_global_attribute('current_call_ctx') + if layer_call_ctx is None or layer_call_ctx.entry_layer == self: + global_state.set_global_attribute('current_call_ctx', None) + + def _flatten_layers(self, include_self=True, recursive=True): + layers = [] + if include_self: + layers.append(self) + seen_object_ids = set() + deque = collections.deque(self._layers) + while deque: + layer = deque.popleft() + if id(layer) in seen_object_ids: + continue + seen_object_ids.add(id(layer)) + layers.append(layer) + if recursive: + deque.extendleft(layer._layers) + return layers + + def _set_mask_metadata(self, inputs, outputs, previous_mask): + flat_outputs = tree.flatten(outputs) + mask_already_computed = all((getattr(x, '_keras_mask', None) is not None for x in flat_outputs)) + if mask_already_computed: + return + output_masks = self.compute_mask(inputs, previous_mask) + if output_masks is None: + return + flat_masks = tree.flatten(output_masks) + for (tensor, mask) in zip(flat_outputs, flat_masks): + if getattr(tensor, '_keras_mask', None) is None: + try: + if backend.backend() == 'numpy': + warnings.warn('The NumPy backend does not support masking at thistime. Masks will be ignored.') + tensor._keras_mask = mask + except AttributeError: + pass + + @python_utils.default + def get_config(self): + self._check_super_called() + base_config = super().get_config() + config = {'trainable': self.trainable, 'dtype': dtype_policies.serialize(self.dtype_policy)} + if self.activity_regularizer is not None: + config['activity_regularizer'] = regularizers.serialize(self.activity_regularizer) + return {**base_config, **config} + + def _open_name_scope(self): + if self._parent_path is None: + self._parent_path = current_path() + return backend.name_scope(self.name, caller=self) + +def is_backend_tensor_or_symbolic(x, allow_none=False): + if allow_none and x is None: + return True + return backend.is_tensor(x) or isinstance(x, backend.KerasTensor) + +class CallSpec: + + def __init__(self, signature, args, kwargs): + if 'training' in kwargs and 'training' not in signature.parameters: + kwargs.pop('training') + bound_args = signature.bind(*args, **kwargs) + else: + bound_args = signature.bind(*args, **kwargs) + self.user_arguments_dict = {k: v for (k, v) in bound_args.arguments.items()} + bound_args.apply_defaults() + arg_dict = {} + arg_names = [] + tensor_arg_dict = {} + tensor_args = [] + tensor_arg_names = [] + nested_tensor_arg_names = [] + for (name, value) in bound_args.arguments.items(): + arg_dict[name] = value + arg_names.append(name) + if is_backend_tensor_or_symbolic(value): + tensor_args.append(value) + tensor_arg_names.append(name) + tensor_arg_dict[name] = value + elif tree.is_nested(value) and len(value) > 0: + flat_values = tree.flatten(value) + if all((is_backend_tensor_or_symbolic(x, allow_none=True) for x in flat_values)): + tensor_args.append(value) + tensor_arg_names.append(name) + tensor_arg_dict[name] = value + nested_tensor_arg_names.append(name) + elif any((is_backend_tensor_or_symbolic(x) for x in flat_values)): + raise ValueError(f'In a nested call() argument, you cannot mix tensors and non-tensors. Received invalid mixed argument: {name}={value}') + self.arguments_dict = arg_dict + self.argument_names = arg_names + self.tensor_arguments_dict = tensor_arg_dict + self.tensor_arguments_names = tensor_arg_names + self.nested_tensor_argument_names = nested_tensor_arg_names + self.first_arg = arg_dict[arg_names[0]] + if all((backend.is_tensor(x) for x in self.tensor_arguments_dict.values())): + self.eager = True + else: + self.eager = False + +def get_arguments_dict(fn, args, kwargs): + sig = inspect.signature(fn) + bound_args = sig.bind(*args, **kwargs) + arg_dict = {} + for (name, value) in bound_args.arguments.items(): + arg_dict[name] = value + return arg_dict + +def get_shapes_dict(call_spec): + shapes_dict = {} + for (k, v) in call_spec.tensor_arguments_dict.items(): + if k == 'mask' or k.endswith('_mask'): + continue + if k == 'kwargs' or k == 'args': + continue + if k in call_spec.nested_tensor_argument_names: + shapes_dict[f'{k}_shape'] = tree.map_structure(lambda x: backend.standardize_shape(x.shape), v) + else: + shapes_dict[f'{k}_shape'] = backend.standardize_shape(v.shape) + return shapes_dict + +def update_shapes_dict_for_target_fn(target_fn, shapes_dict, call_spec, class_name): + if utils.is_default(target_fn): + return None + sig = inspect.signature(target_fn) + expected_names = [] + for (name, param) in sig.parameters.items(): + if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY, param.KEYWORD_ONLY): + expected_names.append(name) + if len(expected_names) == 1: + key = expected_names[0] + values = tuple(shapes_dict.values()) + if values: + input_shape = values[0] + else: + input_shape = None + return {key: input_shape} + kwargs = {} + for name in expected_names: + method_name = target_fn.__name__ + error_preamble = f'For a `{method_name}()` method with more than one argument, all arguments should have a `_shape` suffix and match an argument from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` ' + if not name.endswith('_shape'): + raise ValueError(f"{error_preamble} For layer '{class_name}', Received `{method_name}()` argument `{name}`, which does not end in `_shape`.") + expected_call_arg = utils.removesuffix(name, '_shape') + if expected_call_arg not in call_spec.arguments_dict: + raise ValueError(f"{error_preamble} For layer '{class_name}', received `{method_name}()` argument `{name}`, but `call()` does not have argument `{expected_call_arg}`.") + if name in shapes_dict: + kwargs[name] = shapes_dict[name] + return kwargs + +class CallContext: + + def __init__(self, entry_layer): + self.entry_layer = entry_layer + self.training = None + +def is_shape_tuple(s): + return isinstance(s, (list, tuple)) and all((d is None or isinstance(d, int) for d in s)) + +def might_have_unbuilt_state(layer): + return any((not lr.built for lr in layer._layers)) + +# File: keras-master/keras/src/layers/merging/add.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Add') +class Add(Merge): + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output + +@keras_export('keras.layers.add') +def add(inputs, **kwargs): + return Add(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/average.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Average') +class Average(Merge): + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output / len(inputs) + +@keras_export('keras.layers.average') +def average(inputs, **kwargs): + return Average(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/base_merge.py +from keras.src import backend +from keras.src import ops +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer + +class Merge(Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + + def _merge_function(self, inputs): + raise NotImplementedError + + def _apply_merge_op_and_or_mask(self, op_fn, inputs): + output = None + output_mask = None + for x in inputs: + mask = getattr(x, '_keras_mask', None) + if mask is not None: + mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x)) + if output is None: + output = x + output_mask = mask + continue + if mask is not None: + x = ops.where(mask, x, output) + if output_mask is not None: + output = ops.where(output_mask, output, x) + if mask is not None and output_mask is not None: + output_mask = ops.logical_or(output_mask, mask) + else: + output_mask = None + output = op_fn(output, x) + if output_mask is not None: + output_mask = ops.any(output_mask, axis=-1, keepdims=False) + output._keras_mask = output_mask + return output + + def _compute_elemwise_op_output_shape(self, shape1, shape2): + if None in [shape1, shape2]: + return None + elif len(shape1) < len(shape2): + return self._compute_elemwise_op_output_shape(shape2, shape1) + elif not shape2: + return shape1 + output_shape = list(shape1[:-len(shape2)]) + for (i, j) in zip(shape1[-len(shape2):], shape2): + if i is None or j is None: + output_shape.append(None) + elif i == 1: + output_shape.append(j) + elif j == 1: + output_shape.append(i) + else: + if i != j: + raise ValueError(f'Inputs have incompatible shapes. Received shapes {shape1} and {shape2}') + output_shape.append(i) + return tuple(output_shape) + + def build(self, input_shape): + if not isinstance(input_shape[0], (tuple, list)): + raise ValueError(f'A merge layer should be called on a list of inputs. Received: input_shape={input_shape} (not a list of shapes)') + if len(input_shape) < 1: + raise ValueError(f'A merge layer should be called on a list of at least 1 input. Received {len(input_shape)} inputs. Full input_shape received: {input_shape}') + batch_sizes = {s[0] for s in input_shape if s} - {None} + if len(batch_sizes) > 1: + raise ValueError(f'Cannot merge tensors with different batch sizes. Received tensors with shapes {input_shape}') + if input_shape[0] is None: + output_shape = None + else: + output_shape = input_shape[0][1:] + for i in range(1, len(input_shape)): + if input_shape[i] is None: + shape = None + else: + shape = input_shape[i][1:] + output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) + if None not in input_shape and len(set(map(len, input_shape))) == 1: + self._reshape_required = False + else: + self._reshape_required = True + self.built = True + + def call(self, inputs): + if not isinstance(inputs, (list, tuple)): + raise ValueError(f'A merge layer should be called on a list of inputs. Received: inputs={inputs} (not a list of tensors)') + if self._reshape_required: + reshaped_inputs = [] + input_ndims = list(map(ops.ndim, inputs)) + if None not in input_ndims: + max_ndim = max(input_ndims) + for x in inputs: + x_ndim = ops.ndim(x) + for _ in range(max_ndim - x_ndim): + x = ops.expand_dims(x, axis=1) + reshaped_inputs.append(x) + return self._merge_function(reshaped_inputs) + else: + transposed = False + for x in inputs: + x_ndim = ops.ndim(x) + if x_ndim is None: + x_shape = ops.shape(x) + batch_size = x_shape[0] + new_shape = backend.concatenate([x_shape[1:], ops.expand_dims(batch_size, axis=-1)]) + x_transposed = ops.reshape(x, ops.stack([batch_size, ops.prod(x_shape[1:])], axis=0)) + x_transposed = ops.transpose(x_transposed, perm=(1, 0)) + x_transposed = ops.reshape(x_transposed, new_shape) + reshaped_inputs.append(x_transposed) + transposed = True + elif x_ndim > 1: + dims = list(range(1, x_ndim)) + [0] + reshaped_inputs.append(ops.transpose(x, perm=dims)) + print(dims) + transposed = True + else: + reshaped_inputs.append(x) + y = self._merge_function(reshaped_inputs) + y_ndim = ops.ndim(y) + if transposed: + if y_ndim is None: + y_shape = ops.shape(y) + y_ndim = ops.shape(y_shape)[0] + batch_size = y_shape[y_ndim - 1] + new_shape = ops.concatenate([ops.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1]]) + y = ops.reshape(y, (-1, batch_size)) + y = ops.transpose(y, perm=(1, 0)) + y = ops.reshape(y, new_shape) + elif y_ndim > 1: + dims = [y_ndim - 1] + list(range(y_ndim - 1)) + y = ops.transpose(y, perm=dims) + return y + else: + return self._merge_function(inputs) + + def compute_output_shape(self, input_shape): + if input_shape[0] is None: + output_shape = None + else: + output_shape = input_shape[0][1:] + for i in range(1, len(input_shape)): + if input_shape[i] is None: + shape = None + else: + shape = input_shape[i][1:] + output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) + batch_sizes = {s[0] for s in input_shape if s is not None} - {None} + if len(batch_sizes) == 1: + output_shape = (list(batch_sizes)[0],) + output_shape + else: + output_shape = (None,) + output_shape + return output_shape + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape([x.shape for x in inputs]) + output_sparse = all((x.sparse for x in inputs)) + return KerasTensor(output_shape, dtype=self.compute_dtype, sparse=output_sparse) + + def compute_mask(self, inputs, mask=None): + if mask is None: + return None + if not isinstance(mask, (tuple, list)): + raise ValueError(f'`mask` should be a list. Received: mask={mask}') + if not isinstance(inputs, (tuple, list)): + raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}') + if len(mask) != len(inputs): + raise ValueError(f'The lists `inputs` and `mask` should have the same length. Received: inputs={inputs} of length {len(inputs)}, and mask={mask} of length {len(mask)}') + if any((m is None for m in mask)): + return None + output_mask = mask[0] + for m in mask[1:]: + output_mask = ops.logical_or(output_mask, m) + return output_mask + + def get_config(self): + return super().get_config() + +# File: keras-master/keras/src/layers/merging/concatenate.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Concatenate') +class Concatenate(Merge): + + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + self.axis = axis + self.supports_masking = True + self._reshape_required = False + + def build(self, input_shape): + if len(input_shape) < 1 or not isinstance(input_shape[0], (tuple, list)): + raise ValueError(f'A `Concatenate` layer should be called on a list of at least 1 input. Received: input_shape={input_shape}') + if all((shape is None for shape in input_shape)): + return + reduced_inputs_shapes = [list(shape) for shape in input_shape] + shape_set = set() + for i in range(len(reduced_inputs_shapes)): + concat_axis = self.axis % len(reduced_inputs_shapes[i]) + for (axis, axis_value) in enumerate(reduced_inputs_shapes[i][1:], start=1): + if axis != concat_axis and axis_value == 1: + del reduced_inputs_shapes[i][axis] + if len(reduced_inputs_shapes[i]) > self.axis: + del reduced_inputs_shapes[i][self.axis] + shape_set.add(tuple(reduced_inputs_shapes[i])) + if len(shape_set) != 1: + err_msg = f'A `Concatenate` layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape={input_shape}' + ranks = set((len(shape) for shape in shape_set)) + if len(ranks) != 1: + raise ValueError(err_msg) + (rank,) = ranks + for axis in range(rank): + unique_dims = set((shape[axis] for shape in shape_set if shape[axis] is not None)) + if len(unique_dims) > 1: + raise ValueError(err_msg) + self.built = True + + def _merge_function(self, inputs): + return ops.concatenate(inputs, axis=self.axis) + + def compute_output_shape(self, input_shape): + if not isinstance(input_shape, (tuple, list)) or not isinstance(input_shape[0], (tuple, list)): + raise ValueError(f'A `Concatenate` layer should be called on a list of inputs. Received: input_shape={input_shape}') + input_shapes = input_shape + output_shape = list(input_shapes[0]) + for shape in input_shapes[1:]: + if output_shape[self.axis] is None or shape[self.axis] is None: + output_shape[self.axis] = None + break + output_shape[self.axis] += shape[self.axis] + return tuple(output_shape) + + def compute_mask(self, inputs, mask=None): + if mask is None: + return None + if not isinstance(mask, (tuple, list)): + raise ValueError(f'`mask` should be a list. Received mask={mask}') + if not isinstance(inputs, (tuple, list)): + raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}') + if len(mask) != len(inputs): + raise ValueError(f'The lists `inputs` and `mask` should have the same length. Received: inputs={inputs} of length {len(inputs)}, and mask={mask} of length {len(mask)}') + if all((m is None for m in mask)): + return None + masks = [] + for (input_i, mask_i) in zip(inputs, mask): + if mask_i is None: + masks.append(ops.ones_like(input_i, dtype='bool')) + elif mask_i.ndim < input_i.ndim: + masks.append(ops.broadcast_to(ops.expand_dims(mask_i, axis=-1), ops.shape(input_i))) + else: + masks.append(mask_i) + concatenated = ops.concatenate(masks, axis=self.axis) + return ops.any(concatenated, axis=-1, keepdims=False) + + def get_config(self): + config = {'axis': self.axis} + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + +@keras_export('keras.layers.concatenate') +def concatenate(inputs, axis=-1, **kwargs): + return Concatenate(axis=axis, **kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/dot.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge +from keras.src.utils.numerical_utils import normalize + +def batch_dot(x, y, axes=None): + x_shape = x.shape + y_shape = y.shape + x_ndim = len(x_shape) + y_ndim = len(y_shape) + if x_ndim < 2 or y_ndim < 2: + raise ValueError(f'Cannot do batch_dot on inputs with rank < 2. Received inputs with shapes {x_shape} and {y_shape}.') + x_batch_size = x_shape[0] + y_batch_size = y_shape[0] + if x_batch_size is not None and y_batch_size is not None: + if x_batch_size != y_batch_size: + raise ValueError(f'Cannot do batch_dot on inputs with different batch sizes. Received inputs with shapes {x_shape} and {y_shape}.') + if isinstance(axes, int): + axes = [axes, axes] + if axes is None: + if y_ndim == 2: + axes = [x_ndim - 1, y_ndim - 1] + else: + axes = [x_ndim - 1, y_ndim - 2] + if any((isinstance(a, (list, tuple)) for a in axes)): + raise ValueError(f'Multiple target dimensions are not supported. Expected: None, int, (int, int), Provided: {axes} ') + axes = list(axes) + if axes[0] < 0: + axes[0] += x_ndim + if axes[1] < 0: + axes[1] += y_ndim + if 0 in axes: + raise ValueError('Cannot perform batch_dot over axis 0. If your inputs are not batched, add a dummy batch dimension to your inputs using keras.ops.expand_dims(x, 0)') + (a0, a1) = axes + d1 = x_shape[a0] + d2 = y_shape[a1] + if d1 is not None and d2 is not None and (d1 != d2): + raise ValueError(f'Cannot do batch_dot on inputs with shapes {x_shape} and {y_shape} with axes={axes}. x.shape[{axes[0]}] != y.shape[{axes[1]}] ({d1} != {d2}).') + orig_x_ndim = x_ndim + orig_y_ndim = y_ndim + if x_ndim == 2: + x = ops.expand_dims(x, 1) + a0 += 1 + x_ndim += 1 + if y_ndim == 2: + y = ops.expand_dims(y, 2) + y_ndim += 1 + if a0 != x_ndim - 1: + pattern = list(range(x_ndim)) + for i in range(a0, x_ndim - 1): + pattern[i] = pattern[i + 1] + pattern[-1] = a0 + x = ops.transpose(x, pattern) + if a1 != 1: + pattern = list(range(y_ndim)) + for i in range(a1, 1, -1): + pattern[i] = pattern[i - 1] + pattern[1] = a1 + y = ops.transpose(y, pattern) + if x_ndim > 3: + x_shape = ops.shape(x) + x_mid_dims = x_shape[1:-1] + x_squashed_shape = (x_shape[0], -1, x_shape[-1]) + x = ops.reshape(x, x_squashed_shape) + x_squashed = True + else: + x_squashed = False + if y_ndim > 3: + y_shape = ops.shape(y) + y_trail_dims = y_shape[2:] + y_squashed_shape = (y_shape[0], y_shape[1], -1) + y = ops.reshape(y, y_squashed_shape) + y_squashed = True + else: + y_squashed = False + result = ops.matmul(x, y) + output_shape = ops.shape(result) + do_reshape = False + if x_squashed: + output_shape = output_shape[:1] + x_mid_dims + output_shape[-1:] + do_reshape = True + if y_squashed: + output_shape = output_shape[:-1] + y_trail_dims + do_reshape = True + if do_reshape: + result = ops.reshape(result, output_shape) + if orig_x_ndim == 2: + result = ops.squeeze(result, 1) + elif orig_y_ndim == 2: + result = ops.squeeze(result, -1) + return result + +@keras_export('keras.layers.Dot') +class Dot(Merge): + + def __init__(self, axes, normalize=False, **kwargs): + super().__init__(**kwargs) + if not isinstance(axes, int): + if not isinstance(axes, (list, tuple)): + raise TypeError(f'Invalid type for argument `axes`: it should be a list or an int. Received: axes={axes}') + if len(axes) != 2: + raise ValueError(f'Invalid format for argument `axes`: it should contain two elements. Received: axes={axes}') + if not isinstance(axes[0], int) or not isinstance(axes[1], int): + raise ValueError(f'Invalid format for argument `axes`: list elements should be integers. Received: axes={axes}') + self.axes = axes + self.normalize = normalize + self.supports_masking = True + self._reshape_required = False + + def build(self, input_shape): + if not isinstance(input_shape[0], (tuple, list)) or len(input_shape) != 2: + raise ValueError(f'A `Dot` layer should be called on a list of 2 inputs. Received: input_shape={input_shape}') + shape1 = input_shape[0] + shape2 = input_shape[1] + if shape1 is None or shape2 is None: + return + if isinstance(self.axes, int): + if self.axes < 0: + axes = [self.axes % len(shape1), self.axes % len(shape2)] + else: + axes = [self.axes] * 2 + else: + axes = self.axes + if shape1[axes[0]] != shape2[axes[1]]: + raise ValueError(f'Incompatible input shapes: axis values {shape1[axes[0]]} (at axis {axes[0]}) != {shape2[axes[1]]} (at axis {axes[1]}). Full input shapes: {shape1}, {shape2}') + self.built = True + + def _merge_function(self, inputs): + if len(inputs) != 2: + raise ValueError(f'A `Dot` layer should be called on exactly 2 inputs. Received: inputs={inputs}') + x1 = inputs[0] + x2 = inputs[1] + if isinstance(self.axes, int): + if self.axes < 0: + axes = [self.axes % len(x1.shape), self.axes % len(x2.shape)] + else: + axes = [self.axes] * 2 + else: + axes = [] + for i in range(len(self.axes)): + if self.axes[i] < 0: + axes.append(self.axes[i] % len(inputs[i].shape)) + else: + axes.append(self.axes[i]) + if self.normalize: + x1 = normalize(x1, axis=axes[0]) + x2 = normalize(x2, axis=axes[1]) + output = batch_dot(x1, x2, axes) + return output + + def compute_output_shape(self, input_shape): + if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2: + raise ValueError(f'A `Dot` layer should be called on a list of 2 inputs. Received: input_shape={input_shape}') + shape1 = list(input_shape[0]) + shape2 = list(input_shape[1]) + if isinstance(self.axes, int): + if self.axes < 0: + axes = [self.axes % len(shape1), self.axes % len(shape2)] + else: + axes = [self.axes] * 2 + else: + axes = self.axes + shape1.pop(axes[0]) + shape2.pop(axes[1]) + shape2.pop(0) + output_shape = shape1 + shape2 + if len(output_shape) == 1: + output_shape += [1] + return tuple(output_shape) + + def compute_mask(self, inputs, mask=None): + return None + + def get_config(self): + config = {'axes': self.axes, 'normalize': self.normalize} + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + +@keras_export('keras.layers.dot') +def dot(inputs, axes=-1, **kwargs): + return Dot(axes=axes, **kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/maximum.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Maximum') +class Maximum(Merge): + + def _merge_function(self, inputs): + return self._apply_merge_op_and_or_mask(ops.maximum, inputs) + +@keras_export('keras.layers.maximum') +def maximum(inputs, **kwargs): + return Maximum(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/minimum.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Minimum') +class Minimum(Merge): + + def _merge_function(self, inputs): + return self._apply_merge_op_and_or_mask(ops.minimum, inputs) + +@keras_export('keras.layers.minimum') +def minimum(inputs, **kwargs): + return Minimum(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/multiply.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Multiply') +class Multiply(Merge): + + def _merge_function(self, inputs): + masks = [getattr(x, '_keras_mask', None) for x in inputs] + has_output_mask = all((mask is not None for mask in masks)) + output = None + output_mask = None + for (x, mask) in zip(inputs, masks): + if mask is not None: + mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x)) + x = ops.where(mask, x, ops.cast(1, x.dtype)) + if has_output_mask: + output_mask = mask if output_mask is None else ops.logical_or(output_mask, mask) + output = x if output is None else ops.multiply(output, x) + if has_output_mask: + output = ops.where(output_mask, output, ops.cast(0, output.dtype)) + output_mask = ops.any(output_mask, axis=-1, keepdims=False) + output._keras_mask = output_mask + return output + +@keras_export('keras.layers.multiply') +def multiply(inputs, **kwargs): + return Multiply(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/merging/subtract.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + +@keras_export('keras.layers.Subtract') +class Subtract(Merge): + + def build(self, input_shape): + super().build(input_shape) + if len(input_shape) != 2: + raise ValueError(f'A `Subtract` layer should be called on exactly 2 inputs. Received: input_shape={input_shape}') + + def _merge_function(self, inputs): + if len(inputs) != 2: + raise ValueError(f'A `Subtract` layer should be called on exactly 2 inputs. Received: inputs={inputs}') + return ops.subtract(inputs[0], inputs[1]) + +@keras_export('keras.layers.subtract') +def subtract(inputs, **kwargs): + return Subtract(**kwargs)(inputs) + +# File: keras-master/keras/src/layers/normalization/batch_normalization.py +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.BatchNormalization') +class BatchNormalization(Layer): + + def __init__(self, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, synchronized=False, **kwargs): + super().__init__(**kwargs) + self.axis = int(axis) + if synchronized and backend.backend() != 'tensorflow': + raise ValueError('Argument synchronized=True is only supported with the TensorFlow backend.') + self.synchronized = synchronized + self.momentum = float(momentum) + self.epsilon = float(epsilon) + self.center = center + self.scale = scale + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.moving_mean_initializer = initializers.get(moving_mean_initializer) + self.moving_variance_initializer = initializers.get(moving_variance_initializer) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + self.supports_masking = True + self.gamma = None + self.beta = None + self.moving_mean = None + self.moving_variance = None + self._reduction_axes = None + + def build(self, input_shape): + shape = (input_shape[self.axis],) + if self.scale: + self.gamma = self.add_weight(shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, trainable=True, autocast=False) + if self.center: + self.beta = self.add_weight(shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, trainable=True, autocast=False) + self.moving_mean = self.add_weight(shape=shape, name='moving_mean', initializer=self.moving_mean_initializer, trainable=False, autocast=False) + self.moving_variance = self.add_weight(shape=shape, name='moving_variance', initializer=self.moving_variance_initializer, trainable=False, autocast=False) + self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}) + reduction_axes = list(range(len(input_shape))) + del reduction_axes[self.axis] + self._reduction_axes = reduction_axes + self.built = True + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError(f'Axis {axis} is out of bounds for input shape {input_shape}. Received: axis={self.axis}') + return input_shape + + def call(self, inputs, training=None, mask=None): + if mask is not None: + if len(mask.shape) != len(inputs.shape) - 1: + raise ValueError(f'The mask provided should be one dimension less than the inputs. Received: mask.shape={mask.shape}, inputs.shape={inputs.shape}') + compute_dtype = backend.result_type(inputs.dtype, 'float32') + inputs = ops.cast(inputs, compute_dtype) + moving_mean = ops.cast(self.moving_mean, inputs.dtype) + moving_variance = ops.cast(self.moving_variance, inputs.dtype) + if training and self.trainable: + (mean, variance) = self._moments(inputs, mask) + self.moving_mean.assign(moving_mean * self.momentum + mean * (1.0 - self.momentum)) + self.moving_variance.assign(moving_variance * self.momentum + variance * (1.0 - self.momentum)) + else: + mean = moving_mean + variance = moving_variance + if self.scale: + gamma = ops.cast(self.gamma, inputs.dtype) + else: + gamma = None + if self.center: + beta = ops.cast(self.beta, inputs.dtype) + else: + beta = None + outputs = ops.batch_normalization(x=inputs, mean=mean, variance=variance, axis=self.axis, offset=beta, scale=gamma, epsilon=self.epsilon) + return ops.cast(outputs, self.compute_dtype) + + def get_config(self): + base_config = super().get_config() + config = {'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'moving_mean_initializer': initializers.serialize(self.moving_mean_initializer), 'moving_variance_initializer': initializers.serialize(self.moving_variance_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint), 'synchronized': self.synchronized} + return {**base_config, **config} + + def _moments(self, inputs, mask): + if mask is None: + return ops.moments(inputs, axes=self._reduction_axes, synchronized=self.synchronized) + mask_weights = ops.cast(mask, inputs.dtype) + mask_weights_broadcasted = ops.expand_dims(mask_weights, axis=-1) + weighted_inputs = mask_weights_broadcasted * inputs + weighted_input_sum = ops.sum(weighted_inputs, self._reduction_axes, keepdims=True) + sum_of_weights = ops.sum(mask_weights_broadcasted, self._reduction_axes, keepdims=True) + mean = weighted_input_sum / (sum_of_weights + backend.config.epsilon()) + difference = weighted_inputs - mean + squared_difference = ops.square(difference) + weighted_distsq = ops.sum(mask_weights_broadcasted * squared_difference, self._reduction_axes, keepdims=True) + variance = weighted_distsq / (sum_of_weights + backend.config.epsilon()) + return (ops.squeeze(mean), ops.squeeze(variance)) + +# File: keras-master/keras/src/layers/normalization/group_normalization.py +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.GroupNormalization') +class GroupNormalization(Layer): + + def __init__(self, groups=32, axis=-1, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + self.groups = groups + self.axis = axis + self.epsilon = epsilon + self.center = center + self.scale = scale + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + + def build(self, input_shape): + dim = input_shape[self.axis] + if dim is None: + raise ValueError(f'Axis {self.axis} of input tensor should have a defined dimension but the layer received an input with shape {input_shape}.') + if self.groups == -1: + self.groups = dim + if dim < self.groups: + raise ValueError(f'Number of groups ({self.groups}) cannot be more than the number of channels ({dim}).') + if dim % self.groups != 0: + raise ValueError(f'Number of groups ({self.groups}) must be a multiple of the number of channels ({dim}).') + self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim}) + if self.scale: + self.gamma = self.add_weight(shape=(dim,), name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) + else: + self.gamma = None + if self.center: + self.beta = self.add_weight(shape=(dim,), name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) + else: + self.beta = None + super().build(input_shape) + + def call(self, inputs): + reshaped_inputs = self._reshape_into_groups(inputs) + normalized_inputs = self._apply_normalization(reshaped_inputs, inputs.shape) + return ops.reshape(normalized_inputs, ops.shape(inputs)) + + def _reshape_into_groups(self, inputs): + input_shape = ops.shape(inputs) + group_shape = list(inputs.shape) + group_shape[0] = -1 + for (i, e) in enumerate(group_shape[1:]): + if e is None: + group_shape[i + 1] = input_shape[i + 1] + group_shape[self.axis] = input_shape[self.axis] // self.groups + group_shape.insert(self.axis, self.groups) + reshaped_inputs = ops.reshape(inputs, group_shape) + return reshaped_inputs + + def _apply_normalization(self, reshaped_inputs, input_shape): + group_reduction_axes = list(range(1, len(reshaped_inputs.shape))) + axis = -2 if self.axis == -1 else self.axis - 1 + group_reduction_axes.pop(axis) + broadcast_shape = self._create_broadcast_shape(input_shape) + (mean, variance) = ops.moments(reshaped_inputs, axes=group_reduction_axes, keepdims=True) + inv = ops.rsqrt(variance + self.epsilon) + if self.scale: + gamma = ops.reshape(self.gamma, broadcast_shape) + gamma = ops.cast(gamma, reshaped_inputs.dtype) + inv = inv * gamma + res = -mean * inv + if self.center: + beta = ops.reshape(self.beta, broadcast_shape) + beta = ops.cast(beta, reshaped_inputs.dtype) + res = res + beta + normalized_inputs = reshaped_inputs * inv + res + return normalized_inputs + + def _create_broadcast_shape(self, input_shape): + broadcast_shape = [1] * len(input_shape) + broadcast_shape[self.axis] = input_shape[self.axis] // self.groups + broadcast_shape.insert(self.axis, self.groups) + return broadcast_shape + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError(f'Axis {axis} is out of bounds for input shape {input_shape}. Received: axis={self.axis}') + return input_shape + + def get_config(self): + config = {'groups': self.groups, 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint)} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/normalization/layer_normalization.py +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.LayerNormalization') +class LayerNormalization(Layer): + + def __init__(self, axis=-1, epsilon=0.001, center=True, scale=True, rms_scaling=False, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): + super().__init__(**kwargs) + if isinstance(axis, (list, tuple)): + self.axis = list(axis) + elif isinstance(axis, int): + self.axis = axis + else: + raise TypeError("Expected an int or a list/tuple of ints for the argument 'axis', but received: %r" % axis) + self.epsilon = epsilon + self.center = center + self.scale = scale + self.rms_scaling = rms_scaling + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + self.supports_masking = True + self.autocast = False + + def build(self, input_shape): + if isinstance(self.axis, list): + shape = tuple([input_shape[dim] for dim in self.axis]) + else: + shape = (input_shape[self.axis],) + self.axis = [self.axis] + if self.scale or self.rms_scaling: + self.gamma = self.add_weight(name='gamma', shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, trainable=True, autocast=False) + else: + self.gamma = None + if self.center and (not self.rms_scaling): + self.beta = self.add_weight(name='beta', shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, trainable=True, autocast=False) + else: + self.beta = None + self.built = True + + def call(self, inputs): + input_shape = inputs.shape + ndims = len(input_shape) + broadcast_shape = [1] * ndims + for dim in self.axis: + broadcast_shape[dim] = input_shape[dim] + + def _broadcast(v): + if v is not None and len(v.shape) != ndims and (self.axis != [ndims - 1]): + return ops.reshape(v, broadcast_shape) + return v + compute_dtype = backend.result_type(inputs.dtype, 'float32') + inputs = ops.cast(inputs, compute_dtype) + if self.rms_scaling: + variance = ops.var(inputs, axis=self.axis, keepdims=True) + inv = ops.rsqrt(variance + self.epsilon) + outputs = inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype) + else: + (mean, variance) = ops.moments(inputs, axes=self.axis, keepdims=True) + (gamma, beta) = (_broadcast(self.gamma), _broadcast(self.beta)) + inv = ops.rsqrt(variance + self.epsilon) + if gamma is not None: + gamma = ops.cast(gamma, inputs.dtype) + inv = inv * gamma + res = -mean * inv + if beta is not None: + beta = ops.cast(beta, inputs.dtype) + res = res + beta + outputs = inputs * inv + res + return ops.cast(outputs, self.compute_dtype) + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError(f'Axis {axis} is out of bounds for input shape {input_shape}. Received: axis={self.axis}') + return input_shape + + def get_config(self): + config = {'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'rms_scaling': self.rms_scaling, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint)} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/normalization/spectral_normalization.py +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.utils.numerical_utils import normalize + +@keras_export('keras.layers.SpectralNormalization') +class SpectralNormalization(Wrapper): + + def __init__(self, layer, power_iterations=1, **kwargs): + super().__init__(layer, **kwargs) + if power_iterations <= 0: + raise ValueError(f'`power_iterations` should be greater than zero. Received: `power_iterations={power_iterations}`') + self.power_iterations = power_iterations + + def build(self, input_shape): + super().build(input_shape) + self.input_spec = InputSpec(shape=[None] + list(input_shape[1:])) + if hasattr(self.layer, 'kernel'): + self.kernel = self.layer.kernel + elif hasattr(self.layer, 'embeddings'): + self.kernel = self.layer.embeddings + else: + raise ValueError(f"{type(self.layer).__name__} object has no attribute 'kernel' nor 'embeddings'") + self.kernel_shape = self.kernel.shape + self.vector_u = self.add_weight(shape=(1, self.kernel_shape[-1]), initializer=initializers.TruncatedNormal(stddev=0.02), trainable=False, name='vector_u', dtype=self.kernel.dtype) + + def call(self, inputs, training=False): + if training: + (new_vector_u, new_kernel) = ops.cond(ops.all(ops.equal(self.kernel.value, 0)), lambda : (self.vector_u.value, self.kernel.value), self.normalized_weights) + self.vector_u.assign(new_vector_u) + self.kernel.assign(new_kernel) + output = self.layer(inputs) + return ops.cast(output, inputs.dtype) + + def compute_output_shape(self, input_shape): + return self.layer.compute_output_shape(input_shape) + + def normalized_weights(self): + weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]]) + vector_u = self.vector_u.value + for _ in range(self.power_iterations): + vector_v = normalize(ops.matmul(vector_u, ops.transpose(weights)), axis=None) + vector_u = normalize(ops.matmul(vector_v, weights), axis=None) + sigma = ops.matmul(ops.matmul(vector_v, weights), ops.transpose(vector_u)) + kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape) + return (ops.cast(vector_u, self.vector_u.dtype), ops.cast(kernel, self.kernel.dtype)) + + def get_config(self): + config = {'power_iterations': self.power_iterations} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/normalization/unit_normalization.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.UnitNormalization') +class UnitNormalization(Layer): + + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + if isinstance(axis, (list, tuple)): + self.axis = list(axis) + elif isinstance(axis, int): + self.axis = axis + else: + raise TypeError(f'Invalid value for `axis` argument: expected an int or a list/tuple of ints. Received: axis={axis}') + self.supports_masking = True + self.built = True + + def call(self, inputs): + return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12) + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError(f'Axis {self.axis} is out of bounds for input shape {input_shape}.') + return input_shape + + def get_config(self): + config = super().get_config() + config.update({'axis': self.axis}) + return config + +# File: keras-master/keras/src/layers/pooling/average_pooling1d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D']) +class AveragePooling1D(BasePooling): + + def __init__(self, pool_size, strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=1, pool_mode='average', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/pooling/average_pooling2d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D']) +class AveragePooling2D(BasePooling): + + def __init__(self, pool_size, strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=2, pool_mode='average', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/pooling/average_pooling3d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D']) +class AveragePooling3D(BasePooling): + + def __init__(self, pool_size, strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=3, pool_mode='average', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/pooling/base_global_pooling.py +from keras.src import backend +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +class BaseGlobalPooling(Layer): + + def __init__(self, pool_dimensions, data_format=None, keepdims=False, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.keepdims = keepdims + self.input_spec = InputSpec(ndim=pool_dimensions + 2) + self.built = True + + def call(self, inputs): + raise NotImplementedError + + def compute_output_shape(self, input_shape): + num_spatial_dims = len(input_shape) - 2 + if self.data_format == 'channels_last': + if self.keepdims: + return (input_shape[0],) + (1,) * num_spatial_dims + (input_shape[-1],) + else: + return (input_shape[0],) + (input_shape[-1],) + elif self.keepdims: + return (input_shape[0], input_shape[1]) + (1,) * num_spatial_dims + else: + return (input_shape[0], input_shape[1]) + + def get_config(self): + config = super().get_config() + config.update({'data_format': self.data_format, 'keepdims': self.keepdims}) + return config + +# File: keras-master/keras/src/layers/pooling/base_pooling.py +from keras.src import backend +from keras.src import ops +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_pooling_output_shape +from keras.src.utils import argument_validation + +class BasePooling(Layer): + + def __init__(self, pool_size, strides, pool_dimensions, pool_mode='max', padding='valid', data_format=None, name=None, **kwargs): + super().__init__(name=name, **kwargs) + self.pool_size = argument_validation.standardize_tuple(pool_size, pool_dimensions, 'pool_size') + strides = pool_size if strides is None else strides + self.strides = argument_validation.standardize_tuple(strides, pool_dimensions, 'strides', allow_zero=True) + self.pool_mode = pool_mode + self.padding = padding + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(ndim=pool_dimensions + 2) + self.built = True + + def call(self, inputs): + if self.pool_mode == 'max': + return ops.max_pool(inputs, pool_size=self.pool_size, strides=self.strides, padding=self.padding, data_format=self.data_format) + elif self.pool_mode == 'average': + return ops.average_pool(inputs, pool_size=self.pool_size, strides=self.strides, padding=self.padding, data_format=self.data_format) + else: + raise ValueError(f"`pool_mode` must be either 'max' or 'average'. Received: {self.pool_mode}.") + + def compute_output_shape(self, input_shape): + return compute_pooling_output_shape(input_shape, self.pool_size, self.strides, self.padding, self.data_format) + + def get_config(self): + config = super().get_config() + config.update({'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format}) + return config + +# File: keras-master/keras/src/layers/pooling/global_average_pooling1d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAvgPool1D']) +class GlobalAveragePooling1D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=1, data_format=data_format, keepdims=keepdims, **kwargs) + self.supports_masking = True + + def call(self, inputs, mask=None): + steps_axis = 1 if self.data_format == 'channels_last' else 2 + if mask is not None: + mask = backend.cast(mask, inputs[0].dtype) + mask = ops.expand_dims(mask, 2 if self.data_format == 'channels_last' else 1) + inputs *= mask + return ops.sum(inputs, axis=steps_axis, keepdims=self.keepdims) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims) + else: + return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims) + + def compute_mask(self, inputs, mask=None): + return None + +# File: keras-master/keras/src/layers/pooling/global_average_pooling2d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAvgPool2D']) +class GlobalAveragePooling2D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=2, data_format=data_format, keepdims=keepdims, **kwargs) + + def call(self, inputs): + if self.data_format == 'channels_last': + return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims) + return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims) + +# File: keras-master/keras/src/layers/pooling/global_average_pooling3d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalAveragePooling3D', 'keras.layers.GlobalAvgPool3D']) +class GlobalAveragePooling3D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=3, data_format=data_format, keepdims=keepdims, **kwargs) + + def call(self, inputs): + if self.data_format == 'channels_last': + return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims) + return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims) + +# File: keras-master/keras/src/layers/pooling/global_max_pooling1d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPool1D']) +class GlobalMaxPooling1D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=1, data_format=data_format, keepdims=keepdims, **kwargs) + + def call(self, inputs): + steps_axis = 1 if self.data_format == 'channels_last' else 2 + return ops.max(inputs, axis=steps_axis, keepdims=self.keepdims) + +# File: keras-master/keras/src/layers/pooling/global_max_pooling2d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalMaxPooling2D', 'keras.layers.GlobalMaxPool2D']) +class GlobalMaxPooling2D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=2, data_format=data_format, keepdims=keepdims, **kwargs) + + def call(self, inputs): + if self.data_format == 'channels_last': + return ops.max(inputs, axis=[1, 2], keepdims=self.keepdims) + return ops.max(inputs, axis=[2, 3], keepdims=self.keepdims) + +# File: keras-master/keras/src/layers/pooling/global_max_pooling3d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + +@keras_export(['keras.layers.GlobalMaxPooling3D', 'keras.layers.GlobalMaxPool3D']) +class GlobalMaxPooling3D(BaseGlobalPooling): + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__(pool_dimensions=3, data_format=data_format, keepdims=keepdims, **kwargs) + + def call(self, inputs): + if self.data_format == 'channels_last': + return ops.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims) + return ops.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims) + +# File: keras-master/keras/src/layers/pooling/max_pooling1d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.MaxPooling1D', 'keras.layers.MaxPool1D']) +class MaxPooling1D(BasePooling): + + def __init__(self, pool_size=2, strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=1, pool_mode='max', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/pooling/max_pooling2d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.MaxPooling2D', 'keras.layers.MaxPool2D']) +class MaxPooling2D(BasePooling): + + def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=2, pool_mode='max', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/pooling/max_pooling3d.py +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + +@keras_export(['keras.layers.MaxPooling3D', 'keras.layers.MaxPool3D']) +class MaxPooling3D(BasePooling): + + def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, name=None, **kwargs): + super().__init__(pool_size, strides, pool_dimensions=3, pool_mode='max', padding=padding, data_format=data_format, name=name, **kwargs) + +# File: keras-master/keras/src/layers/preprocessing/category_encoding.py +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils import backend_utils +from keras.src.utils import numerical_utils + +@keras_export('keras.layers.CategoryEncoding') +class CategoryEncoding(TFDataLayer): + + def __init__(self, num_tokens=None, output_mode='multi_hot', sparse=False, **kwargs): + super().__init__(**kwargs) + if output_mode == 'binary': + output_mode = 'multi_hot' + if output_mode not in ('count', 'one_hot', 'multi_hot'): + raise ValueError(f'Unknown arg for output_mode: {output_mode}') + if num_tokens is None: + raise ValueError('num_tokens must be set to use this layer. If the number of tokens is not known beforehand, use the IntegerLookup layer instead.') + if num_tokens < 1: + raise ValueError(f'`num_tokens` must be >= 1. Received: num_tokens={num_tokens}.') + self.num_tokens = num_tokens + self.output_mode = output_mode + self.sparse = sparse + self._allow_non_tensor_positional_args = True + self._convert_input_args = False + + def _encode(self, inputs, count_weights=None): + inputs = self.backend.core.convert_to_tensor(inputs) + return numerical_utils.encode_categorical_inputs(inputs, output_mode=self.output_mode, depth=self.num_tokens, dtype=self.dtype, sparse=self.sparse, count_weights=count_weights, backend_module=self.backend) + + def compute_output_shape(self, input_shape): + if (input_shape is not None) & (len(input_shape) == 0): + return (self.num_tokens,) + if self.output_mode == 'one_hot': + if input_shape[-1] != 1: + return tuple(input_shape) + (self.num_tokens,) + elif len(input_shape) == 1: + return tuple(input_shape) + (self.num_tokens,) + else: + return tuple(input_shape[:-1]) + (self.num_tokens,) + return tuple(input_shape[:-1]) + (self.num_tokens,) + + def compute_output_spec(self, inputs, count_weights=None): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor(output_shape, dtype=self.compute_dtype, sparse=self.sparse) + + def get_config(self): + config = {'num_tokens': self.num_tokens, 'output_mode': self.output_mode} + base_config = super().get_config() + return {**base_config, **config} + + def call(self, inputs, count_weights=None): + if count_weights is not None: + if self.output_mode != 'count': + raise ValueError("`count_weights` is not used when `output_mode` is not `'count'`. Received `count_weights={count_weights}`.") + count_weights = self.backend.convert_to_tensor(count_weights, dtype=self.compute_dtype) + outputs = self._encode(inputs, count_weights) + return backend_utils.convert_tf_tensor(outputs) + +# File: keras-master/keras/src/layers/preprocessing/discretization.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils import argument_validation +from keras.src.utils import numerical_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.Discretization') +class Discretization(TFDataLayer): + + def __init__(self, bin_boundaries=None, num_bins=None, epsilon=0.01, output_mode='int', sparse=False, dtype=None, name=None): + if dtype is None: + dtype = 'int64' if output_mode == 'int' else backend.floatx() + super().__init__(name=name, dtype=dtype) + if sparse and (not backend.SUPPORTS_SPARSE_TENSORS): + raise ValueError(f'`sparse=True` cannot be used with backend {backend.backend()}') + if sparse and output_mode == 'int': + raise ValueError(f"`sparse=True` may only be used if `output_mode` is `'one_hot'`, `'multi_hot'`, or `'count'`. Received: sparse={sparse} and output_mode={output_mode}") + argument_validation.validate_string_arg(output_mode, allowable_strings=('int', 'one_hot', 'multi_hot', 'count'), caller_name=self.__class__.__name__, arg_name='output_mode') + if num_bins is not None and num_bins < 0: + raise ValueError(f'`num_bins` must be greater than or equal to 0. Received: `num_bins={num_bins}`') + if num_bins is not None and bin_boundaries is not None: + if len(bin_boundaries) != num_bins - 1: + raise ValueError(f'Both `num_bins` and `bin_boundaries` should not be set. Received: `num_bins={num_bins}` and `bin_boundaries={bin_boundaries}`') + self.input_bin_boundaries = bin_boundaries + self.bin_boundaries = bin_boundaries if bin_boundaries is not None else [] + self.num_bins = num_bins + self.epsilon = epsilon + self.output_mode = output_mode + self.sparse = sparse + if self.bin_boundaries: + self.summary = None + else: + self.summary = np.array([[], []], dtype='float32') + + def build(self, input_shape=None): + self.built = True + + @property + def input_dtype(self): + return backend.floatx() + + def adapt(self, data, steps=None): + if self.input_bin_boundaries is not None: + raise ValueError('Cannot adapt a Discretization layer that has been initialized with `bin_boundaries`, use `num_bins` instead.') + self.reset_state() + if isinstance(data, tf.data.Dataset): + if steps is not None: + data = data.take(steps) + for batch in data: + self.update_state(batch) + else: + self.update_state(data) + self.finalize_state() + + def update_state(self, data): + data = np.array(data).astype('float32') + summary = summarize(data, self.epsilon) + self.summary = merge_summaries(summary, self.summary, self.epsilon) + + def finalize_state(self): + if self.input_bin_boundaries is not None: + return + self.bin_boundaries = get_bin_boundaries(self.summary, self.num_bins).tolist() + + def reset_state(self): + if self.input_bin_boundaries is not None: + return + self.summary = np.array([[], []], dtype='float32') + + def compute_output_spec(self, inputs): + return backend.KerasTensor(shape=inputs.shape, dtype=self.compute_dtype) + + def load_own_variables(self, store): + if len(store) == 1: + self.summary = store['0'] + return + + def call(self, inputs): + indices = self.backend.numpy.digitize(inputs, self.bin_boundaries) + return numerical_utils.encode_categorical_inputs(indices, output_mode=self.output_mode, depth=len(self.bin_boundaries) + 1, dtype=self.compute_dtype, sparse=self.sparse, backend_module=self.backend) + + def get_config(self): + return {'bin_boundaries': self.bin_boundaries, 'num_bins': self.num_bins, 'epsilon': self.epsilon, 'output_mode': self.output_mode, 'sparse': self.sparse, 'name': self.name, 'dtype': self.dtype} + +def summarize(values, epsilon): + values = np.reshape(values, [-1]) + values = np.sort(values) + elements = np.size(values) + num_buckets = 1.0 / epsilon + increment = elements / num_buckets + start = increment + step = max(increment, 1) + boundaries = values[int(start)::int(step)] + weights = np.ones_like(boundaries) + weights = weights * step + return np.stack([boundaries, weights]) + +def merge_summaries(prev_summary, next_summary, epsilon): + merged = np.concatenate((prev_summary, next_summary), axis=1) + merged = np.take(merged, np.argsort(merged[0]), axis=1) + return compress_summary(merged, epsilon) + +def get_bin_boundaries(summary, num_bins): + return compress_summary(summary, 1.0 / num_bins)[0, :-1] + +def compress_summary(summary, epsilon): + if summary.shape[1] * epsilon < 1: + return summary + percents = epsilon + np.arange(0.0, 1.0, epsilon) + cum_weights = summary[1].cumsum() + cum_weight_percents = cum_weights / cum_weights[-1] + new_bins = np.interp(percents, cum_weight_percents, summary[0]) + cum_weights = np.interp(percents, cum_weight_percents, cum_weights) + new_weights = cum_weights - np.concatenate((np.array([0]), cum_weights[:-1])) + summary = np.stack((new_bins, new_weights)) + return summary.astype('float32') + +# File: keras-master/keras/src/layers/preprocessing/feature_space.py +from keras.src import backend +from keras.src import layers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.saving import saving_lib +from keras.src.saving import serialization_lib +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf +from keras.src.utils.naming import auto_name + +class Cross: + + def __init__(self, feature_names, crossing_dim, output_mode='one_hot'): + if output_mode not in {'int', 'one_hot'}: + raise ValueError(f"Invalid value for argument `output_mode`. Expected one of {{'int', 'one_hot'}}. Received: output_mode={output_mode}") + self.feature_names = tuple(feature_names) + self.crossing_dim = crossing_dim + self.output_mode = output_mode + + @property + def name(self): + return '_X_'.join(self.feature_names) + + def get_config(self): + return {'feature_names': self.feature_names, 'crossing_dim': self.crossing_dim, 'output_mode': self.output_mode} + + @classmethod + def from_config(cls, config): + return cls(**config) + +class Feature: + + def __init__(self, dtype, preprocessor, output_mode): + if output_mode not in {'int', 'one_hot', 'float'}: + raise ValueError(f"Invalid value for argument `output_mode`. Expected one of {{'int', 'one_hot', 'float'}}. Received: output_mode={output_mode}") + self.dtype = dtype + if isinstance(preprocessor, dict): + preprocessor = serialization_lib.deserialize_keras_object(preprocessor) + self.preprocessor = preprocessor + self.output_mode = output_mode + + def get_config(self): + return {'dtype': self.dtype, 'preprocessor': serialization_lib.serialize_keras_object(self.preprocessor), 'output_mode': self.output_mode} + + @classmethod + def from_config(cls, config): + return cls(**config) + +@keras_export('keras.utils.FeatureSpace') +class FeatureSpace(Layer): + + @classmethod + def cross(cls, feature_names, crossing_dim, output_mode='one_hot'): + return Cross(feature_names, crossing_dim, output_mode=output_mode) + + @classmethod + def feature(cls, dtype, preprocessor, output_mode): + return Feature(dtype, preprocessor, output_mode) + + @classmethod + def float(cls, name=None): + name = name or auto_name('float') + preprocessor = TFDIdentity(dtype='float32', name=f'{name}_preprocessor') + return Feature(dtype='float32', preprocessor=preprocessor, output_mode='float') + + @classmethod + def float_rescaled(cls, scale=1.0, offset=0.0, name=None): + name = name or auto_name('float_rescaled') + preprocessor = layers.Rescaling(scale=scale, offset=offset, name=f'{name}_preprocessor') + return Feature(dtype='float32', preprocessor=preprocessor, output_mode='float') + + @classmethod + def float_normalized(cls, name=None): + name = name or auto_name('float_normalized') + preprocessor = layers.Normalization(axis=-1, name=f'{name}_preprocessor') + return Feature(dtype='float32', preprocessor=preprocessor, output_mode='float') + + @classmethod + def float_discretized(cls, num_bins, bin_boundaries=None, output_mode='one_hot', name=None): + name = name or auto_name('float_discretized') + preprocessor = layers.Discretization(num_bins=num_bins, bin_boundaries=bin_boundaries, name=f'{name}_preprocessor') + return Feature(dtype='float32', preprocessor=preprocessor, output_mode=output_mode) + + @classmethod + def integer_categorical(cls, max_tokens=None, num_oov_indices=1, output_mode='one_hot', name=None): + name = name or auto_name('integer_categorical') + preprocessor = layers.IntegerLookup(name=f'{name}_preprocessor', max_tokens=max_tokens, num_oov_indices=num_oov_indices) + return Feature(dtype='int32', preprocessor=preprocessor, output_mode=output_mode) + + @classmethod + def string_categorical(cls, max_tokens=None, num_oov_indices=1, output_mode='one_hot', name=None): + name = name or auto_name('string_categorical') + preprocessor = layers.StringLookup(name=f'{name}_preprocessor', max_tokens=max_tokens, num_oov_indices=num_oov_indices) + return Feature(dtype='string', preprocessor=preprocessor, output_mode=output_mode) + + @classmethod + def string_hashed(cls, num_bins, output_mode='one_hot', name=None): + name = name or auto_name('string_hashed') + preprocessor = layers.Hashing(name=f'{name}_preprocessor', num_bins=num_bins) + return Feature(dtype='string', preprocessor=preprocessor, output_mode=output_mode) + + @classmethod + def integer_hashed(cls, num_bins, output_mode='one_hot', name=None): + name = name or auto_name('integer_hashed') + preprocessor = layers.Hashing(name=f'{name}_preprocessor', num_bins=num_bins) + return Feature(dtype='int32', preprocessor=preprocessor, output_mode=output_mode) + + def __init__(self, features, output_mode='concat', crosses=None, crossing_dim=32, hashing_dim=32, num_discretization_bins=32, name=None): + super().__init__(name=name) + if not features: + raise ValueError('The `features` argument cannot be None or empty.') + self.crossing_dim = crossing_dim + self.hashing_dim = hashing_dim + self.num_discretization_bins = num_discretization_bins + self.features = {name: self._standardize_feature(name, value) for (name, value) in features.items()} + self.crosses = [] + if crosses: + feature_set = set(features.keys()) + for cross in crosses: + if isinstance(cross, dict): + cross = serialization_lib.deserialize_keras_object(cross) + if isinstance(cross, Cross): + self.crosses.append(cross) + else: + if not crossing_dim: + raise ValueError('When specifying `crosses`, the argument `crossing_dim` (dimensionality of the crossing space) should be specified as well.') + for key in cross: + if key not in feature_set: + raise ValueError(f'All features referenced in the `crosses` argument should be present in the `features` dict. Received unknown features: {cross}') + self.crosses.append(Cross(cross, crossing_dim=crossing_dim)) + self.crosses_by_name = {cross.name: cross for cross in self.crosses} + if output_mode not in {'dict', 'concat'}: + raise ValueError(f"Invalid value for argument `output_mode`. Expected one of {{'dict', 'concat'}}. Received: output_mode={output_mode}") + self.output_mode = output_mode + self.inputs = {name: self._feature_to_input(name, value) for (name, value) in self.features.items()} + self.preprocessors = {name: value.preprocessor for (name, value) in self.features.items()} + self.encoded_features = None + self.crossers = {cross.name: self._cross_to_crosser(cross) for cross in self.crosses} + self.one_hot_encoders = {} + self._is_adapted = False + self.concat = None + self._preprocessed_features_names = None + self._crossed_features_names = None + self._sublayers_built = False + + def _feature_to_input(self, name, feature): + return layers.Input(shape=(1,), dtype=feature.dtype, name=name) + + def _standardize_feature(self, name, feature): + if isinstance(feature, Feature): + return feature + if isinstance(feature, dict): + return serialization_lib.deserialize_keras_object(feature) + if feature == 'float': + return self.float(name=name) + elif feature == 'float_normalized': + return self.float_normalized(name=name) + elif feature == 'float_rescaled': + return self.float_rescaled(name=name) + elif feature == 'float_discretized': + return self.float_discretized(name=name, num_bins=self.num_discretization_bins) + elif feature == 'integer_categorical': + return self.integer_categorical(name=name) + elif feature == 'string_categorical': + return self.string_categorical(name=name) + elif feature == 'integer_hashed': + return self.integer_hashed(self.hashing_dim, name=name) + elif feature == 'string_hashed': + return self.string_hashed(self.hashing_dim, name=name) + else: + raise ValueError(f'Invalid feature type: {feature}') + + def _cross_to_crosser(self, cross): + return layers.HashedCrossing(cross.crossing_dim, name=cross.name) + + def _list_adaptable_preprocessors(self): + adaptable_preprocessors = [] + for name in self.features.keys(): + preprocessor = self.preprocessors[name] + if isinstance(preprocessor, layers.Normalization): + if preprocessor.input_mean is not None: + continue + elif isinstance(preprocessor, layers.TextVectorization): + if preprocessor._has_input_vocabulary: + continue + if hasattr(preprocessor, 'adapt'): + adaptable_preprocessors.append(name) + return adaptable_preprocessors + + def adapt(self, dataset): + if not isinstance(dataset, tf.data.Dataset): + raise ValueError(f'`adapt()` can only be called on a tf.data.Dataset. Received instead: {dataset} (of type {type(dataset)})') + for name in self._list_adaptable_preprocessors(): + feature_dataset = dataset.map(lambda x: x[name]) + preprocessor = self.preprocessors[name] + for x in feature_dataset.take(1): + pass + if len(x.shape) == 0: + feature_dataset = feature_dataset.batch(32) + if len(x.shape) in {0, 1}: + feature_dataset = feature_dataset.map(lambda x: tf.expand_dims(x, -1)) + preprocessor.adapt(feature_dataset) + self._is_adapted = True + self.get_encoded_features() + self.built = True + self._sublayers_built = True + + def get_inputs(self): + self._check_if_built() + return self.inputs + + def get_encoded_features(self): + self._check_if_adapted() + if self.encoded_features is None: + preprocessed_features = self._preprocess_features(self.inputs) + crossed_features = self._cross_features(preprocessed_features) + merged_features = self._merge_features(preprocessed_features, crossed_features) + self.encoded_features = merged_features + return self.encoded_features + + def _preprocess_features(self, features): + return {name: self.preprocessors[name](features[name]) for name in features.keys()} + + def _cross_features(self, features): + all_outputs = {} + for cross in self.crosses: + inputs = [features[name] for name in cross.feature_names] + outputs = self.crossers[cross.name](inputs) + all_outputs[cross.name] = outputs + return all_outputs + + def _merge_features(self, preprocessed_features, crossed_features): + if not self._preprocessed_features_names: + self._preprocessed_features_names = sorted(preprocessed_features.keys()) + self._crossed_features_names = sorted(crossed_features.keys()) + all_names = self._preprocessed_features_names + self._crossed_features_names + all_features = [preprocessed_features[name] for name in self._preprocessed_features_names] + [crossed_features[name] for name in self._crossed_features_names] + if self.output_mode == 'dict': + output_dict = {} + else: + features_to_concat = [] + if self._sublayers_built: + for (name, feature) in zip(all_names, all_features): + encoder = self.one_hot_encoders.get(name, None) + if encoder: + feature = encoder(feature) + if self.output_mode == 'dict': + output_dict[name] = feature + else: + features_to_concat.append(feature) + if self.output_mode == 'dict': + return output_dict + else: + return self.concat(features_to_concat) + all_specs = [self.features[name] for name in self._preprocessed_features_names] + [self.crosses_by_name[name] for name in self._crossed_features_names] + for (name, feature, spec) in zip(all_names, all_features, all_specs): + if tree.is_nested(feature): + dtype = tree.flatten(feature)[0].dtype + else: + dtype = feature.dtype + dtype = backend.standardize_dtype(dtype) + if spec.output_mode == 'one_hot': + preprocessor = self.preprocessors.get(name) or self.crossers.get(name) + cardinality = None + if not dtype.startswith('int'): + raise ValueError(f"Feature '{name}' has `output_mode='one_hot'`. Thus its preprocessor should return an integer dtype. Instead it returns a {dtype} dtype.") + if isinstance(preprocessor, (layers.IntegerLookup, layers.StringLookup)): + cardinality = preprocessor.vocabulary_size() + elif isinstance(preprocessor, layers.CategoryEncoding): + cardinality = preprocessor.num_tokens + elif isinstance(preprocessor, layers.Discretization): + cardinality = preprocessor.num_bins + elif isinstance(preprocessor, (layers.HashedCrossing, layers.Hashing)): + cardinality = preprocessor.num_bins + else: + raise ValueError(f"Feature '{name}' has `output_mode='one_hot'`. However it isn't a standard feature and the dimensionality of its output space is not known, thus it cannot be one-hot encoded. Try using `output_mode='int'`.") + if cardinality is not None: + encoder = layers.CategoryEncoding(num_tokens=cardinality, output_mode='multi_hot') + self.one_hot_encoders[name] = encoder + feature = encoder(feature) + if self.output_mode == 'concat': + dtype = feature.dtype + if dtype.startswith('int') or dtype == 'string': + raise ValueError(f"Cannot concatenate features because feature '{name}' has not been encoded (it has dtype {dtype}). Consider using `output_mode='dict'`.") + features_to_concat.append(feature) + else: + output_dict[name] = feature + if self.output_mode == 'concat': + self.concat = TFDConcat(axis=-1) + return self.concat(features_to_concat) + else: + return output_dict + + def _check_if_adapted(self): + if not self._is_adapted: + if not self._list_adaptable_preprocessors(): + self._is_adapted = True + else: + raise ValueError('You need to call `.adapt(dataset)` on the FeatureSpace before you can start using it.') + + def _check_if_built(self): + if not self._sublayers_built: + self._check_if_adapted() + self.get_encoded_features() + self._sublayers_built = True + + def _convert_input(self, x): + if not isinstance(x, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)): + if not isinstance(x, (list, tuple, int, float)): + x = backend.convert_to_numpy(x) + x = tf.convert_to_tensor(x) + return x + + def __call__(self, data): + self._check_if_built() + if not isinstance(data, dict): + raise ValueError(f'A FeatureSpace can only be called with a dict. Received: data={data} (of type {type(data)}') + data = {key: self._convert_input(value) for (key, value) in data.items()} + rebatched = False + for (name, x) in data.items(): + if len(x.shape) == 0: + data[name] = tf.reshape(x, (1, 1)) + rebatched = True + elif len(x.shape) == 1: + data[name] = tf.expand_dims(x, -1) + with backend_utils.TFGraphScope(): + preprocessed_data = self._preprocess_features(data) + preprocessed_data = tree.map_structure(lambda x: self._convert_input(x), preprocessed_data) + crossed_data = self._cross_features(preprocessed_data) + crossed_data = tree.map_structure(lambda x: self._convert_input(x), crossed_data) + merged_data = self._merge_features(preprocessed_data, crossed_data) + if rebatched: + if self.output_mode == 'concat': + assert merged_data.shape[0] == 1 + if backend.backend() != 'tensorflow' and (not backend_utils.in_tf_graph()): + merged_data = backend.convert_to_numpy(merged_data) + merged_data = tf.squeeze(merged_data, axis=0) + else: + for (name, x) in merged_data.items(): + if len(x.shape) == 2 and x.shape[0] == 1: + merged_data[name] = tf.squeeze(x, axis=0) + if backend.backend() != 'tensorflow' and (not backend_utils.in_tf_graph()): + merged_data = tree.map_structure(lambda x: backend.convert_to_tensor(x, dtype=x.dtype), merged_data) + return merged_data + + def get_config(self): + return {'features': serialization_lib.serialize_keras_object(self.features), 'output_mode': self.output_mode, 'crosses': serialization_lib.serialize_keras_object(self.crosses), 'crossing_dim': self.crossing_dim, 'hashing_dim': self.hashing_dim, 'num_discretization_bins': self.num_discretization_bins} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_build_config(self): + return {name: feature.preprocessor.get_build_config() for (name, feature) in self.features.items()} + + def build_from_config(self, config): + for name in config.keys(): + preprocessor = self.features[name].preprocessor + if not preprocessor.built: + preprocessor.build_from_config(config[name]) + self._is_adapted = True + + def save(self, filepath): + saving_lib.save_model(self, filepath) + + def save_own_variables(self, store): + return + + def load_own_variables(self, store): + return + +class TFDConcat(TFDataLayer): + + def __init__(self, axis, **kwargs): + super().__init__(**kwargs) + self.axis = axis + + def call(self, xs): + return self.backend.numpy.concatenate(xs, axis=self.axis) + +class TFDIdentity(TFDataLayer): + + def call(self, x): + return x + +# File: keras-master/keras/src/layers/preprocessing/hashed_crossing.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation +from keras.src.utils import backend_utils +from keras.src.utils import numerical_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.HashedCrossing') +class HashedCrossing(Layer): + + def __init__(self, num_bins, output_mode='int', sparse=False, name=None, dtype=None, **kwargs): + if not tf.available: + raise ImportError('Layer HashedCrossing requires TensorFlow. Install it via `pip install tensorflow`.') + if output_mode == 'int' and dtype is None: + dtype = 'int64' + super().__init__(name=name, dtype=dtype) + if sparse and backend.backend() != 'tensorflow': + raise ValueError('`sparse=True` can only be used with the TensorFlow backend.') + argument_validation.validate_string_arg(output_mode, allowable_strings=('int', 'one_hot'), caller_name=self.__class__.__name__, arg_name='output_mode') + self.num_bins = num_bins + self.output_mode = output_mode + self.sparse = sparse + self._allow_non_tensor_positional_args = True + self._convert_input_args = False + self.supports_jit = False + + def compute_output_shape(self, input_shape): + if not len(input_shape) == 2 or not isinstance(input_shape[0], tuple) or (not isinstance(input_shape[1], tuple)): + raise ValueError(f'Expected as input a list/tuple of 2 tensors. Received input_shape={input_shape}') + if input_shape[0][-1] != input_shape[1][-1]: + raise ValueError(f'Expected the two input tensors to have identical shapes. Received input_shape={input_shape}') + if not input_shape: + if self.output_mode == 'int': + return () + return (self.num_bins,) + if self.output_mode == 'int': + return input_shape[0] + if self.output_mode == 'one_hot' and input_shape[0][-1] != 1: + return tuple(input_shape[0]) + (self.num_bins,) + return tuple(input_shape[0])[:-1] + (self.num_bins,) + + def call(self, inputs): + from keras.src.backend import tensorflow as tf_backend + self._check_at_least_two_inputs(inputs) + inputs = [tf_utils.ensure_tensor(x) for x in inputs] + self._check_input_shape_and_type(inputs) + rank = len(inputs[0].shape) + if rank < 2: + inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs] + if rank < 1: + inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs] + outputs = tf.sparse.cross_hashed(inputs, self.num_bins) + outputs = tf.sparse.to_dense(outputs) + if rank == 2: + outputs = tf.reshape(outputs, [-1, 1]) + elif rank == 1: + outputs = tf.reshape(outputs, [-1]) + elif rank == 0: + outputs = tf.reshape(outputs, []) + outputs = numerical_utils.encode_categorical_inputs(outputs, output_mode=self.output_mode, depth=self.num_bins, sparse=self.sparse, dtype=self.compute_dtype, backend_module=tf_backend) + return backend_utils.convert_tf_tensor(outputs, dtype=self.dtype) + + def get_config(self): + return {'num_bins': self.num_bins, 'output_mode': self.output_mode, 'sparse': self.sparse, 'name': self.name, 'dtype': self.dtype} + + def _check_at_least_two_inputs(self, inputs): + if not isinstance(inputs, (list, tuple)): + raise ValueError(f'`HashedCrossing` should be called on a list or tuple of inputs. Received: inputs={inputs}') + if len(inputs) < 2: + raise ValueError(f'`HashedCrossing` should be called on at least two inputs. Received: inputs={inputs}') + + def _check_input_shape_and_type(self, inputs): + first_shape = tuple(inputs[0].shape) + rank = len(first_shape) + if rank > 2 or (rank == 2 and first_shape[-1] != 1): + raise ValueError(f'All `HashedCrossing` inputs should have shape `()`, `(batch_size)` or `(batch_size, 1)`. Received: inputs={inputs}') + if not all((tuple(x.shape) == first_shape for x in inputs[1:])): + raise ValueError(f'All `HashedCrossing` inputs should have equal shape. Received: inputs={inputs}') + if any((isinstance(x, (tf.RaggedTensor, tf.SparseTensor)) for x in inputs)): + raise ValueError(f'All `HashedCrossing` inputs should be dense tensors. Received: inputs={inputs}') + if not all((tf.as_dtype(x.dtype).is_integer or x.dtype == tf.string for x in inputs)): + raise ValueError(f'All `HashedCrossing` inputs should have an integer or string dtype. Received: inputs={inputs}') + +# File: keras-master/keras/src/layers/preprocessing/hashing.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils import backend_utils +from keras.src.utils import numerical_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.Hashing') +class Hashing(Layer): + + def __init__(self, num_bins, mask_value=None, salt=None, output_mode='int', sparse=False, **kwargs): + if not tf.available: + raise ImportError('Layer Hashing requires TensorFlow. Install it via `pip install tensorflow`.') + if 'dtype' not in kwargs or kwargs['dtype'] is None: + kwargs['dtype'] = 'int64' if output_mode == 'int' else backend.floatx() + super().__init__(**kwargs) + if num_bins is None or num_bins <= 0: + raise ValueError(f'The `num_bins` for `Hashing` cannot be `None` or non-positive values. Received: num_bins={num_bins}.') + if output_mode == 'int' and self.dtype_policy.name not in ('int32', 'int64'): + raise ValueError(f"""When `output_mode="int"`, `dtype` should be an integer type, 'int32' or 'in64'. Received: dtype={kwargs['dtype']}""") + accepted_output_modes = ('int', 'one_hot', 'multi_hot', 'count') + if output_mode not in accepted_output_modes: + raise ValueError(f'Invalid value for argument `output_mode`. Expected one of {accepted_output_modes}. Received: output_mode={output_mode}') + if sparse and output_mode == 'int': + raise ValueError(f'`sparse` may only be true if `output_mode` is `"one_hot"`, `"multi_hot"`, or `"count"`. Received: sparse={sparse} and output_mode={output_mode}') + self.num_bins = num_bins + self.mask_value = mask_value + self.strong_hash = True if salt is not None else False + self.output_mode = output_mode + self.sparse = sparse + self.salt = None + if salt is not None: + if isinstance(salt, (tuple, list)) and len(salt) == 2: + self.salt = list(salt) + elif isinstance(salt, int): + self.salt = [salt, salt] + else: + raise ValueError(f'The `salt` argument for `Hashing` can only be a tuple of size 2 integers, or a single integer. Received: salt={salt}.') + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.supports_jit = False + + def call(self, inputs): + from keras.src.backend import tensorflow as tf_backend + inputs = tf_utils.ensure_tensor(inputs) + if self.output_mode == 'one_hot' and inputs.shape[-1] == 1: + inputs = tf_backend.numpy.squeeze(inputs, axis=-1) + if isinstance(inputs, tf.SparseTensor): + indices = tf.SparseTensor(indices=inputs.indices, values=self._hash_values_to_bins(inputs.values), dense_shape=inputs.dense_shape) + else: + indices = self._hash_values_to_bins(inputs) + outputs = numerical_utils.encode_categorical_inputs(indices, output_mode=self.output_mode, depth=self.num_bins, sparse=self.sparse, dtype=self.dtype, backend_module=tf_backend) + return backend_utils.convert_tf_tensor(outputs) + + def _hash_values_to_bins(self, values): + hash_bins = self.num_bins + mask = None + if self.mask_value is not None and hash_bins > 1: + hash_bins -= 1 + mask = tf.equal(values, self.mask_value) + if values.dtype.is_floating: + values = tf.cast(values, dtype='int64') + if values.dtype != tf.string: + values = tf.as_string(values) + if self.strong_hash: + values = tf.strings.to_hash_bucket_strong(values, hash_bins, name='hash', key=self.salt) + else: + values = tf.strings.to_hash_bucket_fast(values, hash_bins, name='hash') + if mask is not None: + values = tf.add(values, tf.ones_like(values)) + values = tf.where(mask, tf.zeros_like(values), values) + return values + + def compute_output_spec(self, inputs): + if self.output_mode == 'int': + return backend.KerasTensor(shape=inputs.shape, dtype=self.dtype) + if len(inputs.shape) >= 1: + base_shape = tuple(inputs.shape)[:-1] + else: + base_shape = () + return backend.KerasTensor(shape=base_shape + (self.num_bins,), dtype=self.dtype) + + def get_config(self): + config = super().get_config() + config.update({'num_bins': self.num_bins, 'salt': self.salt, 'mask_value': self.mask_value, 'output_mode': self.output_mode, 'sparse': self.sparse}) + return config + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.ops.core import _saturate_cast + +@keras_export('keras.layers.AutoContrast') +class AutoContrast(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. ' + + def __init__(self, value_range=(0, 255), **kwargs): + super().__init__(**kwargs) + self._set_value_range(value_range) + + def _set_value_range(self, value_range): + if not isinstance(value_range, (tuple, list)): + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + if len(value_range) != 2: + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + self.value_range = sorted(value_range) + + def transform_images(self, images, transformation=None, training=True): + original_images = images + images = self._transform_value_range(images, original_range=self.value_range, target_range=(0, 255), dtype=self.compute_dtype) + images = self.backend.cast(images, self.compute_dtype) + low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True) + high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True) + scale = 255.0 / (high - low) + offset = -low * scale + images = images * scale + offset + results = self.backend.numpy.clip(images, 0.0, 255.0) + results = self._transform_value_range(results, original_range=(0, 255), target_range=self.value_range, dtype=self.compute_dtype) + results = self.backend.numpy.where(self.backend.numpy.isnan(results), original_images, results) + if results.dtype == images.dtype: + return results + if backend.is_int_dtype(images.dtype): + results = self.backend.numpy.round(results) + return _saturate_cast(results, images.dtype, self.backend) + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + return bounding_boxes + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return segmentation_masks + + def get_config(self): + config = super().get_config() + config.update({'value_range': self.value_range}) + return config + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py +from keras.src.backend import config as backend_config +from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import densify_bounding_boxes +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer + +class BaseImagePreprocessingLayer(TFDataLayer): + _USE_BASE_FACTOR = True + _FACTOR_BOUNDS = (-1, 1) + + def __init__(self, factor=None, bounding_box_format=None, data_format=None, **kwargs): + super().__init__(**kwargs) + self.bounding_box_format = bounding_box_format + self.data_format = backend_config.standardize_data_format(data_format) + if self._USE_BASE_FACTOR: + factor = factor or 0.0 + self._set_factor(factor) + elif factor is not None: + raise ValueError(f'Layer {self.__class__.__name__} does not take a `factor` argument. Received: factor={factor}') + + def _set_factor(self, factor): + error_msg = f'The `factor` argument should be a number (or a list of two numbers) in the range [{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. Received: factor={factor}' + if isinstance(factor, (tuple, list)): + if len(factor) != 2: + raise ValueError(error_msg) + if factor[0] > self._FACTOR_BOUNDS[1] or factor[1] < self._FACTOR_BOUNDS[0]: + raise ValueError(error_msg) + (lower, upper) = sorted(factor) + elif isinstance(factor, (int, float)): + if factor < self._FACTOR_BOUNDS[0] or factor > self._FACTOR_BOUNDS[1]: + raise ValueError(error_msg) + factor = abs(factor) + (lower, upper) = [max(-factor, self._FACTOR_BOUNDS[0]), factor] + else: + raise ValueError(error_msg) + self.factor = (lower, upper) + + def get_random_transformation(self, data, training=True, seed=None): + return None + + def transform_images(self, images, transformation, training=True): + raise NotImplementedError() + + def transform_labels(self, labels, transformation, training=True): + raise NotImplementedError() + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError() + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + raise NotImplementedError() + + def transform_single_image(self, image, transformation, training=True): + images = self.backend.numpy.expand_dims(image, axis=0) + outputs = self.transform_images(images, transformation=transformation, training=training) + return self.backend.numpy.squeeze(outputs, axis=0) + + def transform_single_label(self, label, transformation, training=True): + labels = self.backend.numpy.expand_dims(label, axis=0) + outputs = self.transform_labels(labels, transformation=transformation, training=training) + return self.backend.numpy.squeeze(outputs, axis=0) + + def transform_single_bounding_box(self, bounding_box, transformation, training=True): + bounding_boxes = self.backend.numpy.expand_dims(bounding_box, axis=0) + outputs = self.transform_bounding_boxes(bounding_boxes, transformation=transformation, training=training) + return self.backend.numpy.squeeze(outputs, axis=0) + + def transform_single_segmentation_mask(self, segmentation_mask, transformation, training=True): + segmentation_masks = self.backend.numpy.expand_dims(segmentation_mask, axis=0) + outputs = self.transform_segmentation_masks(segmentation_masks, transformation=transformation, training=training) + return self.backend.numpy.squeeze(outputs, axis=0) + + def _is_batched(self, maybe_image_batch): + shape = self.backend.core.shape(maybe_image_batch) + if len(shape) == 3: + return False + if len(shape) == 4: + return True + raise ValueError(f'Expected image tensor to have rank 3 (single image) or 4 (batch of images). Received: data.shape={shape}') + + def call(self, data, training=True): + transformation = self.get_random_transformation(data, training=training) + if isinstance(data, dict): + is_batched = self._is_batched(data['images']) + if is_batched: + data['images'] = self.transform_images(self.backend.convert_to_tensor(data['images']), transformation=transformation, training=training) + else: + data['images'] = self.transform_single_image(self.backend.convert_to_tensor(data['images']), transformation=transformation, training=training) + if 'bounding_boxes' in data: + if not self.bounding_box_format: + raise ValueError(f"You passed an input with a 'bounding_boxes' key, but you didn't specify a bounding box format. Pass a `bounding_box_format` argument to your {self.__class__.__name__} layer, e.g. `bounding_box_format='xyxy'`.") + bounding_boxes = densify_bounding_boxes(data['bounding_boxes'], backend=self.backend) + if is_batched: + data['bounding_boxes'] = self.transform_bounding_boxes(bounding_boxes, transformation=transformation, training=training) + else: + data['bounding_boxes'] = self.transform_single_bounding_box(bounding_boxes, transformation=transformation, training=training) + if 'labels' in data: + if is_batched: + data['labels'] = self.transform_labels(self.backend.convert_to_tensor(data['labels']), transformation=transformation, training=training) + else: + data['labels'] = self.transform_single_label(self.backend.convert_to_tensor(data['labels']), transformation=transformation, training=training) + if 'segmentation_masks' in data: + if is_batched: + data['segmentation_masks'] = self.transform_segmentation_masks(data['segmentation_masks'], transformation=transformation, training=training) + else: + data['segmentation_masks'] = self.transform_single_segmentation_mask(data['segmentation_masks'], transformation=transformation, training=training) + return data + if self._is_batched(data): + return self.transform_images(self.backend.convert_to_tensor(data), transformation=transformation, training=training) + return self.transform_single_image(self.backend.convert_to_tensor(data), transformation=transformation, training=training) + + def get_config(self): + config = super().get_config() + if self.bounding_box_format is not None: + config.update({'bounding_box_format': self.bounding_box_format}) + return config + + def _transform_value_range(self, images, original_range, target_range, dtype='float32'): + if original_range[0] == target_range[0] and original_range[1] == target_range[1]: + return images + images = self.backend.cast(images, dtype=dtype) + (original_min_value, original_max_value) = self._unwrap_value_range(original_range, dtype=dtype) + (target_min_value, target_max_value) = self._unwrap_value_range(target_range, dtype=dtype) + images = (images - original_min_value) / (original_max_value - original_min_value) + scale_factor = target_max_value - target_min_value + return images * scale_factor + target_min_value + + def _unwrap_value_range(self, value_range, dtype='float32'): + (min_value, max_value) = value_range + min_value = self.backend.cast(min_value, dtype=dtype) + max_value = self.backend.cast(max_value, dtype=dtype) + return (min_value, max_value) + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py +"""""" +from keras.src import ops +from keras.src.utils import tf_utils + +class RequiresImagesException(Exception): + pass +ALL_AXES = 4 + +def _center_yxhw_to_xyxy(boxes, images=None, image_shape=None): + (y, x, height, width) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([x - width / 2.0, y - height / 2.0, x + width / 2.0, y + height / 2.0], axis=-1) + +def _center_xywh_to_xyxy(boxes, images=None, image_shape=None): + (x, y, width, height) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([x - width / 2.0, y - height / 2.0, x + width / 2.0, y + height / 2.0], axis=-1) + +def _xywh_to_xyxy(boxes, images=None, image_shape=None): + (x, y, width, height) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([x, y, x + width, y + height], axis=-1) + +def _xyxy_to_center_yxhw(boxes, images=None, image_shape=None): + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([(top + bottom) / 2.0, (left + right) / 2.0, bottom - top, right - left], axis=-1) + +def _rel_xywh_to_xyxy(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (x, y, width, height) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([image_width * x, image_height * y, image_width * (x + width), image_height * (y + height)], axis=-1) + +def _xyxy_no_op(boxes, images=None, image_shape=None): + return boxes + +def _xyxy_to_xywh(boxes, images=None, image_shape=None): + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([left, top, right - left, bottom - top], axis=-1) + +def _xyxy_to_rel_xywh(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + (left, right) = (left / image_width, right / image_width) + (top, bottom) = (top / image_height, bottom / image_height) + return ops.concatenate([left, top, right - left, bottom - top], axis=-1) + +def _xyxy_to_center_xywh(boxes, images=None, image_shape=None): + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([(left + right) / 2.0, (top + bottom) / 2.0, right - left, bottom - top], axis=-1) + +def _rel_xyxy_to_xyxy(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + (left, right) = (left * image_width, right * image_width) + (top, bottom) = (top * image_height, bottom * image_height) + return ops.concatenate([left, top, right, bottom], axis=-1) + +def _xyxy_to_rel_xyxy(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + (left, right) = (left / image_width, right / image_width) + (top, bottom) = (top / image_height, bottom / image_height) + return ops.concatenate([left, top, right, bottom], axis=-1) + +def _yxyx_to_xyxy(boxes, images=None, image_shape=None): + (y1, x1, y2, x2) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([x1, y1, x2, y2], axis=-1) + +def _rel_yxyx_to_xyxy(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (top, left, bottom, right) = ops.split(boxes, ALL_AXES, axis=-1) + (left, right) = (left * image_width, right * image_width) + (top, bottom) = (top * image_height, bottom * image_height) + return ops.concatenate([left, top, right, bottom], axis=-1) + +def _xyxy_to_yxyx(boxes, images=None, image_shape=None): + (x1, y1, x2, y2) = ops.split(boxes, ALL_AXES, axis=-1) + return ops.concatenate([y1, x1, y2, x2], axis=-1) + +def _xyxy_to_rel_yxyx(boxes, images=None, image_shape=None): + (image_height, image_width) = _image_shape(images, image_shape, boxes) + (left, top, right, bottom) = ops.split(boxes, ALL_AXES, axis=-1) + (left, right) = (left / image_width, right / image_width) + (top, bottom) = (top / image_height, bottom / image_height) + return ops.concatenate([top, left, bottom, right], axis=-1) +TO_XYXY_CONVERTERS = {'xywh': _xywh_to_xyxy, 'center_xywh': _center_xywh_to_xyxy, 'center_yxhw': _center_yxhw_to_xyxy, 'rel_xywh': _rel_xywh_to_xyxy, 'xyxy': _xyxy_no_op, 'rel_xyxy': _rel_xyxy_to_xyxy, 'yxyx': _yxyx_to_xyxy, 'rel_yxyx': _rel_yxyx_to_xyxy} +FROM_XYXY_CONVERTERS = {'xywh': _xyxy_to_xywh, 'center_xywh': _xyxy_to_center_xywh, 'center_yxhw': _xyxy_to_center_yxhw, 'rel_xywh': _xyxy_to_rel_xywh, 'xyxy': _xyxy_no_op, 'rel_xyxy': _xyxy_to_rel_xyxy, 'yxyx': _xyxy_to_yxyx, 'rel_yxyx': _xyxy_to_rel_yxyx} + +def convert_format(boxes, source, target, images=None, image_shape=None, dtype='float32'): + f"""Converts bounding_boxes from one format to another.\n\n Supported formats are:\n\n - `"xyxy"`, also known as `corners` format. In this format the first four\n axes represent `[left, top, right, bottom]` in that order.\n - `"rel_xyxy"`. In this format, the axes are the same as `"xyxy"` but the x\n coordinates are normalized using the image width, and the y axes the\n image height. All values in `rel_xyxy` are in the range `(0, 1)`.\n - `"xywh"`. In this format the first four axes represent\n `[left, top, width, height]`.\n - `"rel_xywh". In this format the first four axes represent\n [left, top, width, height], just like `"xywh"`. Unlike `"xywh"`, the\n values are in the range (0, 1) instead of absolute pixel values.\n - `"center_xyWH"`. In this format the first two coordinates represent the x\n and y coordinates of the center of the bounding box, while the last two\n represent the width and height of the bounding box.\n - `"center_yxHW"`. In this format the first two coordinates represent the y\n and x coordinates of the center of the bounding box, while the last two\n represent the height and width of the bounding box.\n - `"yxyx"`. In this format the first four axes represent\n [top, left, bottom, right] in that order.\n - `"rel_yxyx"`. In this format, the axes are the same as `"yxyx"` but the x\n coordinates are normalized using the image width, and the y axes the\n image height. All values in `rel_yxyx` are in the range (0, 1).\n Formats are case insensitive. It is recommended that you capitalize width\n and height to maximize the visual difference between `"xyWH"` and `"xyxy"`.\n\n Relative formats, abbreviated `rel`, make use of the shapes of the `images`\n passed. In these formats, the coordinates, widths, and heights are all\n specified as percentages of the host image.\n\n Example:\n\n ```python\n boxes = {'boxes': [TODO], + "labels": [TODO], + }\n boxes_in_xywh = keras.utils.bounding_boxes.convert_format(\n boxes,\n source='xyxy',\n target='xyWH'\n )\n ```\n\n Args:\n boxes: tensor representing bounding boxes in the format specified in\n the `source` parameter. `boxes` can optionally have extra\n dimensions stacked on the final axis to store metadata. boxes\n should be a 3D tensor, with the shape `[batch_size, num_boxes, 4]`.\n Alternatively, boxes can be a dictionary with key 'boxes' containing\n a tensor matching the aforementioned spec.\n source:One of {' '.join([f'"{f}"' for f in TO_XYXY_CONVERTERS.keys()])}.\n Used to specify the original format of the `boxes` parameter.\n target:One of {' '.join([f'"{f}"' for f in TO_XYXY_CONVERTERS.keys()])}.\n Used to specify the destination format of the `boxes` parameter.\n images: (Optional) a batch of images aligned with `boxes` on the first\n axis. Should be at least 3 dimensions, with the first 3 dimensions\n representing: `[batch_size, height, width]`. Used in some\n converters to compute relative pixel values of the bounding box\n dimensions. Required when transforming from a rel format to a\n non-rel format.\n dtype: the data type to use when transforming the boxes, defaults to\n `"float32"`.\n """ + if isinstance(boxes, dict): + converted_boxes = boxes.copy() + converted_boxes['boxes'] = convert_format(boxes['boxes'], source=source, target=target, images=images, image_shape=image_shape, dtype=dtype) + return converted_boxes + if boxes.shape[-1] is not None and boxes.shape[-1] != 4: + raise ValueError(f'Expected `boxes` to be a Tensor with a final dimension of `4`. Instead, got `boxes.shape={boxes.shape}`.') + if images is not None and image_shape is not None: + raise ValueError(f'convert_format() expects either `images` or `image_shape`, but not both. Received images={images} image_shape={image_shape}') + _validate_image_shape(image_shape) + source = source.lower() + target = target.lower() + if source not in TO_XYXY_CONVERTERS: + raise ValueError(f'`convert_format()` received an unsupported format for the argument `source`. `source` should be one of {TO_XYXY_CONVERTERS.keys()}. Got source={source}') + if target not in FROM_XYXY_CONVERTERS: + raise ValueError(f'`convert_format()` received an unsupported format for the argument `target`. `target` should be one of {FROM_XYXY_CONVERTERS.keys()}. Got target={target}') + boxes = ops.cast(boxes, dtype) + if source == target: + return boxes + if source.startswith('rel') and target.startswith('rel'): + source = source.replace('rel_', '', 1) + target = target.replace('rel_', '', 1) + (boxes, images, squeeze) = _format_inputs(boxes, images) + to_xyxy_fn = TO_XYXY_CONVERTERS[source] + from_xyxy_fn = FROM_XYXY_CONVERTERS[target] + try: + in_xyxy = to_xyxy_fn(boxes, images=images, image_shape=image_shape) + result = from_xyxy_fn(in_xyxy, images=images, image_shape=image_shape) + except RequiresImagesException: + raise ValueError(f'convert_format() must receive `images` or `image_shape` when transforming between relative and absolute formats.convert_format() received source=`{format}`, target=`{format}, but images={images} and image_shape={image_shape}.') + return _format_outputs(result, squeeze) + +def _format_inputs(boxes, images): + boxes_rank = len(boxes.shape) + if boxes_rank > 3: + raise ValueError(f'Expected len(boxes.shape)=2, or len(boxes.shape)=3, got len(boxes.shape)={boxes_rank}') + boxes_includes_batch = boxes_rank == 3 + if images is not None: + images_rank = len(images.shape) + if images_rank > 4: + raise ValueError(f'Expected len(images.shape)=2, or len(images.shape)=3, got len(images.shape)={images_rank}') + images_include_batch = images_rank == 4 + if boxes_includes_batch != images_include_batch: + raise ValueError(f'convert_format() expects both boxes and images to be batched, or both boxes and images to be unbatched. Received len(boxes.shape)={boxes_rank}, len(images.shape)={images_rank}. Expected either len(boxes.shape)=2 AND len(images.shape)=3, or len(boxes.shape)=3 AND len(images.shape)=4.') + if not images_include_batch: + images = ops.expand_dims(images, axis=0) + if not boxes_includes_batch: + return (ops.expand_dims(boxes, axis=0), images, True) + return (boxes, images, False) + +def _validate_image_shape(image_shape): + if image_shape is None: + return + if isinstance(image_shape, (tuple, list)): + if len(image_shape) != 3: + raise ValueError(f'image_shape should be of length 3, but got image_shape={image_shape}') + return + if ops.is_tensor(image_shape): + if len(image_shape.shape) > 1: + raise ValueError(f'image_shape.shape should be (3), but got image_shape.shape={image_shape.shape}') + if image_shape.shape[0] != 3: + raise ValueError(f'image_shape.shape should be (3), but got image_shape.shape={image_shape.shape}') + return + raise ValueError(f'Expected image_shape to be either a tuple, list, Tensor. Received image_shape={image_shape}') + +def _format_outputs(boxes, squeeze): + if squeeze: + return ops.squeeze(boxes, axis=0) + return boxes + +def _image_shape(images, image_shape, boxes): + if images is None and image_shape is None: + raise RequiresImagesException() + if image_shape is None: + if not tf_utils.is_ragged_tensor(images): + image_shape = ops.shape(images) + (height, width) = (image_shape[1], image_shape[2]) + else: + height = ops.reshape(images.row_lengths(), (-1, 1)) + width = ops.reshape(ops.max(images.row_lengths(axis=2), 1), (-1, 1)) + height = ops.expand_dims(height, axis=-1) + width = ops.expand_dims(width, axis=-1) + else: + (height, width) = (image_shape[0], image_shape[1]) + return (ops.cast(height, boxes.dtype), ops.cast(width, boxes.dtype)) + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py +class XYXY: + LEFT = 0 + TOP = 1 + RIGHT = 2 + BOTTOM = 3 + +class REL_XYXY: + LEFT = 0 + TOP = 1 + RIGHT = 2 + BOTTOM = 3 + +class CENTER_XYWH: + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + +class XYWH: + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + +class REL_XYWH: + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + +class YXYX: + TOP = 0 + LEFT = 1 + BOTTOM = 2 + RIGHT = 3 + +class REL_YXYX: + TOP = 0 + LEFT = 1 + BOTTOM = 2 + RIGHT = 3 + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py +from keras.src import backend as current_backend +from keras.src.utils import tf_utils + +def densify_bounding_boxes(bounding_boxes, max_boxes=None, boxes_default_value=0, labels_default_value=-1, backend=None): + validate_bounding_boxes(bounding_boxes) + boxes = bounding_boxes['boxes'] + labels = bounding_boxes['labels'] + backend = backend or current_backend + if isinstance(boxes, list): + if boxes and isinstance(boxes[0], list): + if boxes[0] and isinstance(boxes[0][0], list): + if not isinstance(labels[0][0], int): + raise ValueError(f"If providing `bounding_boxes['labels']` as a list, it should contain integers labels. Received: bounding_boxes['labels']={labels}") + if max_boxes is not None: + max_boxes = max([len(b) for b in boxes]) + new_boxes = [] + new_labels = [] + for (b, l) in zip(boxes, labels): + if len(b) >= max_boxes: + new_boxes.append(b[:max_boxes]) + new_labels.append(l[:max_boxes]) + else: + num_boxes_to_add = max_boxes - len(b) + added_boxes = [[boxes_default_value, boxes_default_value, boxes_default_value, boxes_default_value] for _ in range(num_boxes_to_add)] + new_boxes.append(b + added_boxes) + new_labels.append(l + [labels_default_value for _ in range(num_boxes_to_add)]) + elif max_boxes and len(b) >= max_boxes: + new_boxes = b[:max_boxes] + new_labels = l[:max_boxes] + else: + num_boxes_to_add = max_boxes - len(b) + added_boxes = [[boxes_default_value, boxes_default_value, boxes_default_value, boxes_default_value] for _ in range(num_boxes_to_add)] + new_boxes = b + added_boxes + new_labels = l + [labels_default_value for _ in range(num_boxes_to_add)] + return {'boxes': backend.convert_to_tensor(new_boxes, dtype='int32'), 'labels': backend.convert_to_tensor(new_boxes, dtype='int32')} + if tf_utils.is_ragged_tensor(boxes): + bounding_boxes['boxes'] = bounding_boxes['boxes'].to_tensor(default_value=boxes_default_value, shape='TODO') + bounding_boxes['labels'] = bounding_boxes['labels'].to_tensor(default_value=labels_default_value, shape='TODO') + return bounding_boxes + bounding_boxes['boxes'] = backend.convert_to_tensor(boxes, dtype='int32') + bounding_boxes['labels'] = backend.convert_to_tensor(labels) + return bounding_boxes + +def validate_bounding_boxes(bounding_boxes): + if not isinstance(bounding_boxes, dict) or 'labels' not in bounding_boxes or 'boxes' not in bounding_boxes: + raise ValueError(f"Expected `bounding_boxes` agurment to be a dict with keys 'boxes' and 'labels'. Received: bounding_boxes={bounding_boxes}") + boxes = bounding_boxes['boxes'] + labels = bounding_boxes['labels'] + if isinstance(boxes, list): + if not isinstance(labels, list): + raise ValueError(f"If `bounding_boxes['boxes']` is a list, then `bounding_boxes['labels']` must also be a list.Received: bounding_boxes['labels']={labels}") + if len(boxes) != len(labels): + raise ValueError(f"If `bounding_boxes['boxes']` and `bounding_boxes['labels']` are both lists, they must have the same length. Received: len(bounding_boxes['boxes'])={len(boxes)} and len(bounding_boxes['labels'])={len(labels)} and ") + elif tf_utils.is_ragged_tensor(boxes): + if not tf_utils.is_ragged_tensor(labels): + raise ValueError(f"If `bounding_boxes['boxes']` is a Ragged tensor, `bounding_boxes['labels']` must also be a Ragged tensor. Received: bounding_boxes['labels']={labels}") + else: + boxes_shape = current_backend.shape(boxes) + labels_shape = current_backend.shape(labels) + if len(boxes_shape) == 2: + if len(labels_shape) not in {1, 2}: + raise ValueError(f"Found bounding_boxes['boxes'].shape={boxes_shape} and expected bounding_boxes['labels'] to have rank 1 or 2, but received: bounding_boxes['labels'].shape={labels_shape} ") + elif len(boxes_shape) == 3: + if len(labels_shape) not in {2, 3}: + raise ValueError(f"Found bounding_boxes['boxes'].shape={boxes_shape} and expected bounding_boxes['labels'] to have rank 2 or 3, but received: bounding_boxes['labels'].shape={labels_shape} ") + else: + raise ValueError(f"Expected `bounding_boxes['boxes']` to have rank 2 or 3, with shape (num_boxes, 4) or (batch_size, num_boxes, 4). Received: bounding_boxes['boxes'].shape={boxes_shape}") + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/center_crop.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.utils import image_utils + +@keras_export('keras.layers.CenterCrop') +class CenterCrop(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + + def __init__(self, height, width, data_format=None, **kwargs): + super().__init__(data_format=data_format, **kwargs) + self.height = height + self.width = width + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation, training=training) + + def transform_images(self, images, transformation=None, training=True): + inputs = self.backend.cast(images, self.compute_dtype) + if self.data_format == 'channels_first': + init_height = inputs.shape[-2] + init_width = inputs.shape[-1] + else: + init_height = inputs.shape[-3] + init_width = inputs.shape[-2] + if init_height is None or init_width is None: + raise ValueError(f'At this time, CenterCrop can only process images with a static spatial shape. Received: inputs.shape={inputs.shape}') + h_diff = init_height - self.height + w_diff = init_width - self.width + h_start = int(h_diff / 2) + w_start = int(w_diff / 2) + if h_diff >= 0 and w_diff >= 0: + if len(inputs.shape) == 4: + if self.data_format == 'channels_first': + return inputs[:, :, h_start:h_start + self.height, w_start:w_start + self.width] + return inputs[:, h_start:h_start + self.height, w_start:w_start + self.width, :] + elif len(inputs.shape) == 3: + if self.data_format == 'channels_first': + return inputs[:, h_start:h_start + self.height, w_start:w_start + self.width] + return inputs[h_start:h_start + self.height, w_start:w_start + self.width, :] + return image_utils.smart_resize(inputs, [self.height, self.width], data_format=self.data_format, backend_module=self.backend) + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + if isinstance(input_shape[0], (list, tuple)) or len(input_shape) not in (3, 4): + raise ValueError('`input_shape` must be a non-nested tuple or list of rank-1 with size 3 (unbatched) or 4 (batched). ') + if len(input_shape) == 4: + if self.data_format == 'channels_last': + input_shape[1] = self.height + input_shape[2] = self.width + else: + input_shape[2] = self.height + input_shape[3] = self.width + elif self.data_format == 'channels_last': + input_shape[0] = self.height + input_shape[1] = self.width + else: + input_shape[1] = self.height + input_shape[2] = self.width + return tuple(input_shape) + + def get_config(self): + base_config = super().get_config() + config = {'height': self.height, 'width': self.width, 'data_format': self.data_format} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_brightness.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomBrightness') +class RandomBrightness(BaseImagePreprocessingLayer): + _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. ' + + def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs): + super().__init__(factor=factor, **kwargs) + self.seed = seed + self.generator = SeedGenerator(seed) + self._set_value_range(value_range) + + def _set_value_range(self, value_range): + if not isinstance(value_range, (tuple, list)): + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + if len(value_range) != 2: + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + self.value_range = sorted(value_range) + + def get_random_transformation(self, data, training=True, seed=None): + if isinstance(data, dict): + images = data['images'] + else: + images = data + images_shape = self.backend.shape(images) + rank = len(images_shape) + if rank == 3: + rgb_delta_shape = (1, 1, 1) + elif rank == 4: + rgb_delta_shape = [images_shape[0], 1, 1, 1] + else: + raise ValueError(f'Expected the input image to be rank 3 or 4. Received inputs.shape={images_shape}') + if not training: + return {'rgb_delta': self.backend.numpy.zeros(rgb_delta_shape)} + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + rgb_delta = self.backend.random.uniform(minval=self.factor[0], maxval=self.factor[1], shape=rgb_delta_shape, seed=seed) + rgb_delta = rgb_delta * (self.value_range[1] - self.value_range[0]) + return {'rgb_delta': rgb_delta} + + def transform_images(self, images, transformation, training=True): + if training: + rgb_delta = transformation['rgb_delta'] + rgb_delta = self.backend.cast(rgb_delta, images.dtype) + images += rgb_delta + return self.backend.numpy.clip(images, self.value_range[0], self.value_range[1]) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + return bounding_boxes + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return segmentation_masks + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = {'factor': self.factor, 'value_range': self.value_range, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomContrast') +class RandomContrast(BaseImagePreprocessingLayer): + _FACTOR_BOUNDS = (0, 1) + + def __init__(self, factor, seed=None, **kwargs): + super().__init__(**kwargs) + self._set_factor(factor) + self.seed = seed + self.generator = SeedGenerator(seed) + + def get_random_transformation(self, data, training=True, seed=None): + if isinstance(data, dict): + images = data['images'] + else: + images = data + images_shape = self.backend.shape(images) + rank = len(images_shape) + if rank == 3: + factor_shape = (1, 1, 1) + elif rank == 4: + factor_shape = [images_shape[0], 1, 1, 1] + else: + raise ValueError(f'Expected the input image to be rank 3 or 4. Received inputs.shape={images_shape}') + if not training: + return {'contrast_factor': self.backend.numpy.zeros(factor_shape)} + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + factor = self.backend.random.uniform(shape=factor_shape, minval=1.0 - self.factor[0], maxval=1.0 + self.factor[1], seed=seed, dtype=self.compute_dtype) + return {'contrast_factor': factor} + + def transform_images(self, images, transformation, training=True): + if training: + constrast_factor = transformation['contrast_factor'] + outputs = self._adjust_constrast(images, constrast_factor) + outputs = self.backend.numpy.clip(outputs, 0, 255) + self.backend.numpy.reshape(outputs, self.backend.shape(images)) + return outputs + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + return bounding_boxes + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return segmentation_masks + + def _adjust_constrast(self, inputs, contrast_factor): + if self.data_format == 'channels_first': + height_axis = -2 + width_axis = -1 + else: + height_axis = -3 + width_axis = -2 + inp_mean = self.backend.numpy.mean(inputs, axis=height_axis, keepdims=True) + inp_mean = self.backend.numpy.mean(inp_mean, axis=width_axis, keepdims=True) + outputs = (inputs - inp_mean) * contrast_factor + inp_mean + return outputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = {'factor': self.factor, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_crop.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import densify_bounding_boxes +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomCrop') +class RandomCrop(BaseImagePreprocessingLayer): + + def __init__(self, height, width, seed=None, data_format=None, name=None, **kwargs): + super().__init__(name=name, **kwargs) + self.height = height + self.width = width + self.seed = seed if seed is not None else backend.random.make_default_seed() + self.generator = SeedGenerator(seed) + self.data_format = backend.standardize_data_format(data_format) + if self.data_format == 'channels_first': + self.height_axis = -2 + self.width_axis = -1 + elif self.data_format == 'channels_last': + self.height_axis = -3 + self.width_axis = -2 + self.supports_masking = False + self.supports_jit = False + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + + def get_random_transformation(self, data, training=True, seed=None): + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + if isinstance(data, dict): + input_shape = self.backend.shape(data['images']) + else: + input_shape = self.backend.shape(data) + (input_height, input_width) = (input_shape[self.height_axis], input_shape[self.width_axis]) + if input_height is None or input_width is None: + raise ValueError(f'RandomCrop requires the input to have a fully defined height and width. Received: images.shape={input_shape}') + if training and input_height > self.height and (input_width > self.width): + h_start = self.backend.cast(self.backend.random.uniform((), 0, maxval=float(input_height - self.height + 1), seed=seed), 'int32') + w_start = self.backend.cast(self.backend.random.uniform((), 0, maxval=float(input_width - self.width + 1), seed=seed), 'int32') + else: + crop_height = int(float(input_width * self.height) / self.width) + crop_height = max(min(input_height, crop_height), 1) + crop_width = int(float(input_height * self.width) / self.height) + crop_width = max(min(input_width, crop_width), 1) + h_start = int(float(input_height - crop_height) / 2) + w_start = int(float(input_width - crop_width) / 2) + return (h_start, w_start) + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + (crop_box_hstart, crop_box_wstart) = transformation + crop_height = self.height + crop_width = self.width + if self.data_format == 'channels_last': + if len(images.shape) == 4: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + else: + images = images[crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width, :] + elif len(images.shape) == 4: + images = images[:, :, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + else: + images = images[:, crop_box_hstart:crop_box_hstart + crop_height, crop_box_wstart:crop_box_wstart + crop_width] + shape = self.backend.shape(images) + new_height = shape[self.height_axis] + new_width = shape[self.width_axis] + if not isinstance(new_height, int) or not isinstance(new_width, int) or new_height != self.height or (new_width != self.width): + images = self.backend.image.resize(images, size=(self.height, self.width), data_format=self.data_format) + images = self.backend.cast(images, self.compute_dtype) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + (h_start, w_start) = transformation + if not self.backend.is_tensor(bounding_boxes['boxes']): + bounding_boxes = densify_bounding_boxes(bounding_boxes, backend=self.backend) + boxes = bounding_boxes['boxes'] + if len(self.backend.shape(boxes)) == 3: + boxes = self.backend.numpy.stack([self.backend.numpy.maximum(boxes[:, :, 0] - h_start, 0), self.backend.numpy.maximum(boxes[:, :, 1] - w_start, 0), self.backend.numpy.maximum(boxes[:, :, 2] - h_start, 0), self.backend.numpy.maximum(boxes[:, :, 3] - w_start, 0)], axis=-1) + else: + boxes = self.backend.numpy.stack([self.backend.numpy.maximum(boxes[:, 0] - h_start, 0), self.backend.numpy.maximum(boxes[:, 1] - w_start, 0), self.backend.numpy.maximum(boxes[:, 2] - h_start, 0), self.backend.numpy.maximum(boxes[:, 3] - w_start, 0)], axis=-1) + return {'boxes': boxes, 'labels': bounding_boxes['labels']} + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation) + + def compute_output_shape(self, input_shape, *args, **kwargs): + input_shape = list(input_shape) + input_shape[self.height_axis] = self.height + input_shape[self.width_axis] = self.width + return tuple(input_shape) + + def get_config(self): + config = super().get_config() + config.update({'height': self.height, 'width': self.width, 'seed': self.seed, 'data_format': self.data_format}) + return config + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_flip.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator +HORIZONTAL = 'horizontal' +VERTICAL = 'vertical' +HORIZONTAL_AND_VERTICAL = 'horizontal_and_vertical' + +@keras_export('keras.layers.RandomFlip') +class RandomFlip(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + + def __init__(self, mode=HORIZONTAL_AND_VERTICAL, seed=None, data_format=None, **kwargs): + super().__init__(data_format=data_format, **kwargs) + self.seed = seed + self.generator = SeedGenerator(seed) + self.mode = mode + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + + def get_random_transformation(self, data, training=True, seed=None): + if not training: + return None + if isinstance(data, dict): + images = data['images'] + else: + images = data + shape = self.backend.core.shape(images) + if len(shape) == 3: + flips_shape = (1, 1, 1) + else: + flips_shape = (shape[0], 1, 1, 1) + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + flips = self.backend.numpy.less_equal(self.backend.random.uniform(shape=flips_shape, seed=seed), 0.5) + return {'flips': flips} + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + if training: + return self._flip_inputs(images, transformation) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation, training=training) + + def _flip_inputs(self, inputs, transformation): + if transformation is None: + return inputs + flips = transformation['flips'] + inputs_shape = self.backend.shape(inputs) + unbatched = len(inputs_shape) == 3 + if unbatched: + inputs = self.backend.numpy.expand_dims(inputs, axis=0) + flipped_outputs = inputs + if self.data_format == 'channels_last': + horizontal_axis = -2 + vertical_axis = -3 + else: + horizontal_axis = -1 + vertical_axis = -2 + if self.mode == HORIZONTAL or self.mode == HORIZONTAL_AND_VERTICAL: + flipped_outputs = self.backend.numpy.where(flips, self.backend.numpy.flip(flipped_outputs, axis=horizontal_axis), flipped_outputs) + if self.mode == VERTICAL or self.mode == HORIZONTAL_AND_VERTICAL: + flipped_outputs = self.backend.numpy.where(flips, self.backend.numpy.flip(flipped_outputs, axis=vertical_axis), flipped_outputs) + if unbatched: + flipped_outputs = self.backend.numpy.squeeze(flipped_outputs, axis=0) + return flipped_outputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = super().get_config() + config.update({'seed': self.seed, 'mode': self.mode, 'data_format': self.data_format}) + return config + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_rotation.py +import numpy as np +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomRotation') +class RandomRotation(BaseImagePreprocessingLayer): + _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. ' + _SUPPORTED_FILL_MODE = ('reflect', 'wrap', 'constant', 'nearest') + _SUPPORTED_INTERPOLATION = ('nearest', 'bilinear') + + def __init__(self, factor, fill_mode='reflect', interpolation='bilinear', seed=None, fill_value=0.0, value_range=(0, 255), data_format=None, **kwargs): + super().__init__(factor=factor, data_format=data_format, **kwargs) + self.seed = seed + self.generator = SeedGenerator(seed) + self._set_value_range(value_range) + self.fill_mode = fill_mode + self.interpolation = interpolation + self.fill_value = fill_value + self.supports_jit = False + if self.fill_mode not in self._SUPPORTED_FILL_MODE: + raise NotImplementedError(f'Unknown `fill_mode` {fill_mode}. Expected of one {self._SUPPORTED_FILL_MODE}.') + if self.interpolation not in self._SUPPORTED_INTERPOLATION: + raise NotImplementedError(f'Unknown `interpolation` {interpolation}. Expected of one {self._SUPPORTED_INTERPOLATION}.') + + def _set_value_range(self, value_range): + if not isinstance(value_range, (tuple, list)): + raise ValueError(self.value_range_VALIDATION_ERROR + f'Received: value_range={value_range}') + if len(value_range) != 2: + raise ValueError(self.value_range_VALIDATION_ERROR + f'Received: value_range={value_range}') + self.value_range = sorted(value_range) + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + if training: + return self.backend.image.affine_transform(images=images, transform=transformation['rotation_matrix'], interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, data_format=self.data_format) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation, training=training) + '' + + def get_random_transformation(self, data, training=True, seed=None): + if not training: + return None + if isinstance(data, dict): + images = data['images'] + else: + images = data + shape = self.backend.core.shape(images) + if len(shape) == 4: + if self.data_format == 'channels_last': + batch_size = shape[0] + image_height = shape[1] + image_width = shape[2] + else: + batch_size = shape[1] + image_height = shape[2] + image_width = shape[3] + else: + batch_size = 1 + if self.data_format == 'channels_last': + image_height = shape[0] + image_width = shape[1] + else: + image_height = shape[1] + image_width = shape[2] + lower = self.factor[0] * 2.0 * self.backend.convert_to_tensor(np.pi) + upper = self.factor[1] * 2.0 * self.backend.convert_to_tensor(np.pi) + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + angle = self.backend.random.uniform(shape=(batch_size,), minval=lower, maxval=upper, seed=seed) + cos_theta = self.backend.numpy.cos(angle) + sin_theta = self.backend.numpy.sin(angle) + image_height = self.backend.core.cast(image_height, cos_theta.dtype) + image_width = self.backend.core.cast(image_width, cos_theta.dtype) + x_offset = (image_width - 1 - (cos_theta * (image_width - 1) - sin_theta * (image_height - 1))) / 2.0 + y_offset = (image_height - 1 - (sin_theta * (image_width - 1) + cos_theta * (image_height - 1))) / 2.0 + rotation_matrix = self.backend.numpy.concatenate([self.backend.numpy.cos(angle)[:, None], -self.backend.numpy.sin(angle)[:, None], x_offset[:, None], self.backend.numpy.sin(angle)[:, None], self.backend.numpy.cos(angle)[:, None], y_offset[:, None], self.backend.numpy.zeros((batch_size, 2))], axis=1) + if len(shape) == 3: + rotation_matrix = self.backend.numpy.squeeze(rotation_matrix, axis=0) + return {'rotation_matrix': rotation_matrix} + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = {'factor': self.factor, 'value_range': self.value_range, 'data_format': self.data_format, 'fill_mode': self.fill_mode, 'fill_value': self.fill_value, 'interpolation': self.interpolation, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_translation.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomTranslation') +class RandomTranslation(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + _FACTOR_VALIDATION_ERROR = 'The `factor` argument should be a number (or a list of two numbers) in the range [-1.0, 1.0]. ' + _SUPPORTED_FILL_MODE = ('reflect', 'wrap', 'constant', 'nearest') + _SUPPORTED_INTERPOLATION = ('nearest', 'bilinear') + + def __init__(self, height_factor, width_factor, fill_mode='reflect', interpolation='bilinear', seed=None, fill_value=0.0, data_format=None, **kwargs): + super().__init__(data_format=data_format, **kwargs) + self.height_factor = height_factor + (self.height_lower, self.height_upper) = self._set_factor(height_factor, 'height_factor') + self.width_factor = width_factor + (self.width_lower, self.width_upper) = self._set_factor(width_factor, 'width_factor') + if fill_mode not in self._SUPPORTED_FILL_MODE: + raise NotImplementedError(f'Unknown `fill_mode` {fill_mode}. Expected of one {self._SUPPORTED_FILL_MODE}.') + if interpolation not in self._SUPPORTED_INTERPOLATION: + raise NotImplementedError(f'Unknown `interpolation` {interpolation}. Expected of one {self._SUPPORTED_INTERPOLATION}.') + self.fill_mode = fill_mode + self.fill_value = fill_value + self.interpolation = interpolation + self.seed = seed + self.generator = SeedGenerator(seed) + self.supports_jit = False + + def _set_factor(self, factor, factor_name): + if isinstance(factor, (tuple, list)): + if len(factor) != 2: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + self._check_factor_range(factor[0]) + self._check_factor_range(factor[1]) + (lower, upper) = sorted(factor) + elif isinstance(factor, (int, float)): + self._check_factor_range(factor) + factor = abs(factor) + (lower, upper) = [-factor, factor] + else: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + return (lower, upper) + + def _check_factor_range(self, input_number): + if input_number > 1.0 or input_number < -1.0: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: input_number={input_number}') + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + if training: + return self._translate_inputs(images, transformation) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation, training=training) + + def get_random_transformation(self, data, training=True, seed=None): + if not training: + return None + if isinstance(data, dict): + images = data['images'] + else: + images = data + images_shape = self.backend.shape(images) + unbatched = len(images_shape) == 3 + if unbatched: + images_shape = self.backend.shape(images) + batch_size = 1 + else: + batch_size = images_shape[0] + if self.data_format == 'channels_first': + height = images_shape[-2] + width = images_shape[-1] + else: + height = images_shape[-3] + width = images_shape[-2] + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + height_translate = self.backend.random.uniform(minval=self.height_lower, maxval=self.height_upper, shape=[batch_size, 1], seed=seed) + height_translate = self.backend.numpy.multiply(height_translate, height) + width_translate = self.backend.random.uniform(minval=self.width_lower, maxval=self.width_upper, shape=[batch_size, 1], seed=seed) + width_translate = self.backend.numpy.multiply(width_translate, width) + translations = self.backend.cast(self.backend.numpy.concatenate([width_translate, height_translate], axis=1), dtype='float32') + return {'translations': translations} + + def _translate_inputs(self, inputs, transformation): + if transformation is None: + return inputs + inputs_shape = self.backend.shape(inputs) + unbatched = len(inputs_shape) == 3 + if unbatched: + inputs = self.backend.numpy.expand_dims(inputs, axis=0) + translations = transformation['translations'] + outputs = self.backend.image.affine_transform(inputs, transform=self._get_translation_matrix(translations), interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, data_format=self.data_format) + if unbatched: + outputs = self.backend.numpy.squeeze(outputs, axis=0) + return outputs + + def _get_translation_matrix(self, translations): + num_translations = self.backend.shape(translations)[0] + return self.backend.numpy.concatenate([self.backend.numpy.ones((num_translations, 1)), self.backend.numpy.zeros((num_translations, 1)), -translations[:, 0:1], self.backend.numpy.zeros((num_translations, 1)), self.backend.numpy.ones((num_translations, 1)), -translations[:, 1:], self.backend.numpy.zeros((num_translations, 2))], axis=1) + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'height_factor': self.height_factor, 'width_factor': self.width_factor, 'fill_mode': self.fill_mode, 'interpolation': self.interpolation, 'seed': self.seed, 'fill_value': self.fill_value, 'data_format': self.data_format} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/random_zoom.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.RandomZoom') +class RandomZoom(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + _FACTOR_VALIDATION_ERROR = 'The `height_factor` and `width_factor` arguments should be a number (or a list of two numbers) in the range [-1.0, 1.0]. ' + _SUPPORTED_FILL_MODE = ('reflect', 'wrap', 'constant', 'nearest') + _SUPPORTED_INTERPOLATION = ('nearest', 'bilinear') + + def __init__(self, height_factor, width_factor=None, fill_mode='reflect', interpolation='bilinear', seed=None, fill_value=0.0, data_format=None, **kwargs): + super().__init__(**kwargs) + self.height_factor = height_factor + (self.height_lower, self.height_upper) = self._set_factor(height_factor, 'height_factor') + self.width_factor = width_factor + if width_factor is not None: + (self.width_lower, self.width_upper) = self._set_factor(width_factor, 'width_factor') + if fill_mode not in self._SUPPORTED_FILL_MODE: + raise NotImplementedError(f'Unknown `fill_mode` {fill_mode}. Expected of one {self._SUPPORTED_FILL_MODE}.') + if interpolation not in self._SUPPORTED_INTERPOLATION: + raise NotImplementedError(f'Unknown `interpolation` {interpolation}. Expected of one {self._SUPPORTED_INTERPOLATION}.') + self.fill_mode = fill_mode + self.fill_value = fill_value + self.interpolation = interpolation + self.seed = seed + self.generator = SeedGenerator(seed) + self.data_format = backend.standardize_data_format(data_format) + self.supports_jit = False + + def _set_factor(self, factor, factor_name): + if isinstance(factor, (tuple, list)): + if len(factor) != 2: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + self._check_factor_range(factor[0]) + self._check_factor_range(factor[1]) + (lower, upper) = sorted(factor) + elif isinstance(factor, (int, float)): + self._check_factor_range(factor) + factor = abs(factor) + (lower, upper) = [-factor, factor] + else: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + return (lower, upper) + + def _check_factor_range(self, input_number): + if input_number > 1.0 or input_number < -1.0: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: input_number={input_number}') + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + if training: + return self._zoom_inputs(images, transformation) + return images + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return self.transform_images(segmentation_masks, transformation, training=training) + + def get_random_transformation(self, data, training=True, seed=None): + if not training: + return None + if isinstance(data, dict): + images = data['images'] + else: + images = data + images_shape = self.backend.shape(images) + if len(images_shape) == 4: + zoom_factor_shape = (images_shape[0], 1) + else: + zoom_factor_shape = (1, 1) + if not training: + return {'height_zoom': self.backend.numpy.zeros(zoom_factor_shape), 'width_zoom': self.backend.numpy.zeros(zoom_factor_shape)} + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + height_zoom = self.backend.random.uniform(minval=1.0 + self.height_lower, maxval=1.0 + self.height_upper, shape=zoom_factor_shape, seed=seed) + if self.width_factor is not None: + width_zoom = self.backend.random.uniform(minval=1.0 + self.width_lower, maxval=1.0 + self.width_upper, shape=zoom_factor_shape, seed=seed) + else: + width_zoom = height_zoom + return {'height_zoom': height_zoom, 'width_zoom': width_zoom} + + def _zoom_inputs(self, inputs, transformation): + if transformation is None: + return inputs + width_zoom = transformation['width_zoom'] + height_zoom = transformation['height_zoom'] + zooms = self.backend.cast(self.backend.numpy.concatenate([width_zoom, height_zoom], axis=1), dtype='float32') + inputs_shape = self.backend.shape(inputs) + unbatched = len(inputs_shape) == 3 + if unbatched: + inputs = self.backend.numpy.expand_dims(inputs, axis=0) + inputs_shape = self.backend.shape(inputs) + if self.data_format == 'channels_first': + height = inputs_shape[-2] + width = inputs_shape[-1] + else: + height = inputs_shape[-3] + width = inputs_shape[-2] + outputs = self.backend.image.affine_transform(inputs, transform=self._get_zoom_matrix(zooms, height, width), interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, data_format=self.data_format) + if unbatched: + outputs = self.backend.numpy.squeeze(outputs, axis=0) + return outputs + + def _get_zoom_matrix(self, zooms, image_height, image_width): + num_zooms = self.backend.shape(zooms)[0] + x_offset = (self.backend.cast(image_width, 'float32') - 1.0) / 2.0 * (1.0 - zooms[:, 0:1]) + y_offset = (self.backend.cast(image_height, 'float32') - 1.0) / 2.0 * (1.0 - zooms[:, 1:]) + return self.backend.numpy.concatenate([zooms[:, 0:1], self.backend.numpy.zeros((num_zooms, 1)), x_offset, self.backend.numpy.zeros((num_zooms, 1)), zooms[:, 1:], y_offset, self.backend.numpy.zeros((num_zooms, 2))], axis=1) + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'height_factor': self.height_factor, 'width_factor': self.width_factor, 'fill_mode': self.fill_mode, 'interpolation': self.interpolation, 'seed': self.seed, 'fill_value': self.fill_value, 'data_format': self.data_format} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/resizing.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.ops.core import _saturate_cast + +@keras_export('keras.layers.Resizing') +class Resizing(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + + def __init__(self, height, width, interpolation='bilinear', crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None, **kwargs): + super().__init__(**kwargs) + self.height = height + self.width = width + self.interpolation = interpolation + self.data_format = backend.standardize_data_format(data_format) + self.crop_to_aspect_ratio = crop_to_aspect_ratio + self.pad_to_aspect_ratio = pad_to_aspect_ratio + self.fill_mode = fill_mode + self.fill_value = fill_value + + def transform_images(self, images, transformation=None, training=True): + size = (self.height, self.width) + resized = self.backend.image.resize(images, size=size, interpolation=self.interpolation, data_format=self.data_format, crop_to_aspect_ratio=self.crop_to_aspect_ratio, pad_to_aspect_ratio=self.pad_to_aspect_ratio, fill_mode=self.fill_mode, fill_value=self.fill_value) + if resized.dtype == images.dtype: + return resized + if backend.is_int_dtype(images.dtype): + resized = self.backend.numpy.round(resized) + return _saturate_cast(resized, images.dtype, self.backend) + + def transform_segmentation_masks(self, segmentation_masks, transformation=None, training=True): + return self.transform_images(segmentation_masks) + + def transform_labels(self, labels, transformation=None, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + raise NotImplementedError + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + if len(input_shape) == 4: + if self.data_format == 'channels_last': + input_shape[1] = self.height + input_shape[2] = self.width + else: + input_shape[2] = self.height + input_shape[3] = self.width + elif self.data_format == 'channels_last': + input_shape[0] = self.height + input_shape[1] = self.width + else: + input_shape[1] = self.height + input_shape[2] = self.width + return tuple(input_shape) + + def get_config(self): + base_config = super().get_config() + config = {'height': self.height, 'width': self.width, 'interpolation': self.interpolation, 'crop_to_aspect_ratio': self.crop_to_aspect_ratio, 'pad_to_aspect_ratio': self.pad_to_aspect_ratio, 'fill_mode': self.fill_mode, 'fill_value': self.fill_value, 'data_format': self.data_format} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/image_preprocessing/solarization.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import BaseImagePreprocessingLayer +from keras.src.ops.core import _saturate_cast +from keras.src.random.seed_generator import SeedGenerator + +@keras_export('keras.layers.Solarization') +class Solarization(BaseImagePreprocessingLayer): + _USE_BASE_FACTOR = False + _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. ' + _FACTOR_VALIDATION_ERROR = 'The `addition_factor` and `threshold_factor` arguments should be a number (or a list of two numbers) in the range [0, 1]. ' + + def __init__(self, addition_factor=0.0, threshold_factor=0.0, value_range=(0, 255), seed=None, **kwargs): + super().__init__(**kwargs) + self.seed = seed + self.generator = SeedGenerator(seed) + self.addition_factor = self._set_factor(addition_factor, 'addition_factor') + self.threshold_factor = self._set_factor(threshold_factor, 'threshold_factor') + self._set_value_range(value_range) + + def _set_value_range(self, value_range): + if not isinstance(value_range, (tuple, list)): + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + if len(value_range) != 2: + raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') + self.value_range = sorted(value_range) + + def _set_factor(self, factor, factor_name): + if isinstance(factor, (tuple, list)): + if len(factor) != 2: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + self._check_factor_range(factor[0]) + self._check_factor_range(factor[1]) + (lower, upper) = sorted(factor) + elif isinstance(factor, (int, float)): + self._check_factor_range(factor) + (lower, upper) = [0, factor] + else: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: {factor_name}={factor}') + return (lower, upper) + + def _check_factor_range(self, input_number): + if input_number > 1.0 or input_number < 0: + raise ValueError(self._FACTOR_VALIDATION_ERROR + f'Received: input_number={input_number}') + + def get_random_transformation(self, data, training=True, seed=None): + if not training: + return None + if isinstance(data, dict): + images = data['images'] + else: + images = data + images_shape = self.backend.shape(images) + if len(images_shape) == 4: + factor_shape = (images_shape[0], 1, 1, 1) + else: + factor_shape = (1, 1, 1) + if seed is None: + seed = self._get_seed_generator(self.backend._backend) + return {'additions': self.backend.random.uniform(minval=self.addition_factor[0], maxval=self.addition_factor[1] * 255, shape=factor_shape, seed=seed, dtype=self.compute_dtype), 'thresholds': self.backend.random.uniform(minval=self.threshold_factor[0], maxval=self.threshold_factor[1] * 255, shape=factor_shape, seed=seed, dtype=self.compute_dtype)} + + def transform_images(self, images, transformation, training=True): + images = self.backend.cast(images, self.compute_dtype) + if transformation is None: + return images + thresholds = transformation['thresholds'] + additions = transformation['additions'] + images = self._transform_value_range(images, original_range=self.value_range, target_range=(0, 255), dtype=self.compute_dtype) + results = images + additions + results = self.backend.numpy.clip(results, 0, 255) + results = self.backend.numpy.where(results < thresholds, results, 255 - results) + results = self._transform_value_range(results, original_range=(0, 255), target_range=self.value_range, dtype=self.compute_dtype) + if results.dtype == images.dtype: + return results + if backend.is_int_dtype(images.dtype): + results = self.backend.numpy.round(results) + return _saturate_cast(results, images.dtype, self.backend) + + def transform_labels(self, labels, transformation, training=True): + return labels + + def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): + return bounding_boxes + + def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): + return segmentation_masks + + def get_config(self): + base_config = super().get_config() + config = {'value_range': self.value_range, 'addition_factor': self.addition_factor, 'threshold_factor': self.threshold_factor, 'seed': self.seed} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/preprocessing/index_lookup.py +import collections +import numpy as np +from keras.src import backend +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation +from keras.src.utils import numerical_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf + +class IndexLookup(Layer): + + def __init__(self, max_tokens, num_oov_indices, mask_token, oov_token, vocabulary_dtype, vocabulary=None, idf_weights=None, invert=False, output_mode='int', sparse=False, pad_to_max_tokens=False, name=None, **kwargs): + if max_tokens is not None and max_tokens <= 1: + raise ValueError(f'If set, `max_tokens` must be greater than 1. Received: max_tokens={max_tokens}') + if pad_to_max_tokens and max_tokens is None: + raise ValueError(f'If pad_to_max_tokens is True, must set `max_tokens`. Received: max_tokens={max_tokens}') + if num_oov_indices < 0: + raise ValueError(f'`num_oov_indices` must be greater than or equal to 0. Received: num_oov_indices={num_oov_indices}') + if output_mode == 'binary': + output_mode = 'multi_hot' + if output_mode == 'tf-idf': + output_mode = 'tf_idf' + argument_validation.validate_string_arg(output_mode, allowable_strings=('int', 'one_hot', 'multi_hot', 'count', 'tf_idf'), caller_name=self.__class__.__name__, arg_name='output_mode') + if invert and output_mode != 'int': + raise ValueError(f"`output_mode` must be `'int'` when `invert` is true. Received: output_mode={output_mode}") + if sparse and output_mode == 'int': + raise ValueError(f"`sparse` may only be true if `output_mode` is `'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. Received: sparse={sparse} and output_mode={output_mode}") + if idf_weights is not None and output_mode != 'tf_idf': + raise ValueError(f"`idf_weights` should only be set if `output_mode` is `'tf_idf'`. Received: idf_weights={idf_weights} and output_mode={output_mode}") + super().__init__(name=name) + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.supports_jit = False + self.invert = invert + self.max_tokens = max_tokens + self.num_oov_indices = num_oov_indices + self.mask_token = mask_token + self.oov_token = oov_token + self.output_mode = output_mode + self.sparse = sparse + self.pad_to_max_tokens = pad_to_max_tokens + self.vocabulary_dtype = tf.as_dtype(vocabulary_dtype).name + self._frozen_vocab_size = kwargs.pop('vocabulary_size', None) + self.input_vocabulary = vocabulary + self.input_idf_weights = idf_weights + self._has_input_vocabulary = kwargs.pop('has_input_vocabulary', vocabulary is not None) + kwargs.pop('trainable', None) + kwargs.pop('dtype', None) + if kwargs: + raise ValueError(f'Unrecognized keyword argument(s): {kwargs}') + if invert: + self._key_dtype = 'int64' + self._value_dtype = self.vocabulary_dtype + mask_key = 0 + mask_value = mask_token + self._default_value = self.oov_token + else: + self._key_dtype = self.vocabulary_dtype + self._value_dtype = 'int64' + mask_key = mask_token + mask_value = 0 if self.output_mode == 'int' else tf.as_dtype(self._value_dtype).max + if self.num_oov_indices == 0: + self._default_value = -1 + elif self.num_oov_indices == 1: + self._default_value = self._oov_start_index() + else: + self._default_value = -1 + if self.mask_token is not None: + self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype) + self._mask_value = tf.convert_to_tensor(mask_value, self._value_dtype) + if self.output_mode == 'tf_idf': + if self._has_input_vocabulary and idf_weights is None: + raise ValueError('When specifying the `vocabulary` argument, in TF-IDF output mode, the `idf_weights` argument must also be provided.') + if idf_weights is not None: + self.idf_weights = tf.Variable(idf_weights, dtype=backend.floatx(), trainable=False) + self.idf_weights_const = self.idf_weights.value() + if vocabulary is not None: + self.set_vocabulary(vocabulary, idf_weights) + else: + self.lookup_table = self._uninitialized_lookup_table() + if not self._has_input_vocabulary: + self.token_counts = tf.lookup.experimental.MutableHashTable(key_dtype=vocabulary_dtype, value_dtype='int64', default_value=0) + if self.output_mode == 'tf_idf': + self.token_document_counts = tf.lookup.experimental.MutableHashTable(key_dtype=vocabulary_dtype, value_dtype='int64', default_value=0) + self.num_documents = tf.Variable(0, dtype='int64', trainable=False) + + def get_vocabulary(self, include_special_tokens=True): + if self.lookup_table.size() == 0: + (vocab, indices) = ([], []) + else: + (keys, values) = self.lookup_table.export() + (vocab, indices) = (values, keys) if self.invert else (keys, values) + (vocab, indices) = (self._tensor_vocab_to_numpy(vocab), indices.numpy()) + lookup = collections.defaultdict(lambda : self.oov_token, zip(indices, vocab)) + vocab = [lookup[x] for x in range(self.vocabulary_size())] + if self.mask_token is not None and self.output_mode == 'int': + vocab[0] = self.mask_token + if not include_special_tokens: + vocab = vocab[self._token_start_index():] + if self.vocabulary_dtype == 'string': + return [i.decode('utf-8') if isinstance(i, bytes) else i for i in vocab] + else: + return vocab + + def vocabulary_size(self): + if tf.executing_eagerly(): + return int(self.lookup_table.size().numpy()) + self._token_start_index() + else: + return self.lookup_table.size() + self._token_start_index() + + def get_config(self): + config = {'invert': self.invert, 'max_tokens': self.max_tokens, 'num_oov_indices': self.num_oov_indices, 'oov_token': self.oov_token, 'mask_token': self.mask_token, 'output_mode': self.output_mode, 'sparse': self.sparse, 'pad_to_max_tokens': self.pad_to_max_tokens, 'vocabulary_dtype': self.vocabulary_dtype, 'idf_weights': listify_tensors(self.input_idf_weights), 'vocabulary': listify_tensors(self.input_vocabulary), 'vocabulary_size': self._frozen_vocab_size} + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + + def _record_vocabulary_size(self): + self._ensure_vocab_size_unchanged() + with tf.init_scope(): + self._frozen_vocab_size = self.vocabulary_size() + + def set_vocabulary(self, vocabulary, idf_weights=None): + if self.output_mode == 'tf_idf': + if idf_weights is None: + raise ValueError("`idf_weights` must be set if output_mode is 'tf_idf'.") + elif idf_weights is not None: + raise ValueError(f"`idf_weights` should only be set if output_mode is `'tf_idf'`. Received: output_mode={self.output_mode} and idf_weights={idf_weights}") + if isinstance(vocabulary, str): + if not tf.io.gfile.exists(vocabulary): + raise ValueError(f'Vocabulary file {vocabulary} does not exist.') + if self.output_mode == 'tf_idf': + raise ValueError("output_mode `'tf_idf'` does not support loading a vocabulary from file.") + self.lookup_table = self._lookup_table_from_file(vocabulary) + self._record_vocabulary_size() + return + if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)): + raise RuntimeError(f'Cannot set a tensor vocabulary on layer {self.name} when not executing eagerly. Create this layer or call `set_vocabulary()` outside of any traced function.') + if tf.is_tensor(vocabulary): + vocabulary = self._tensor_vocab_to_numpy(vocabulary) + elif isinstance(vocabulary, (list, tuple)): + vocabulary = np.array(vocabulary) + if tf.is_tensor(idf_weights): + idf_weights = idf_weights.numpy() + elif isinstance(idf_weights, (list, tuple)): + idf_weights = np.array(idf_weights) + if vocabulary.size == 0: + raise ValueError(f'Cannot set an empty vocabulary. Received: vocabulary={vocabulary}') + oov_start = self._oov_start_index() + token_start = self._token_start_index() + special_tokens = [self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices + found_special_tokens = np.array_equal(special_tokens, vocabulary[:token_start]) + if found_special_tokens: + tokens = vocabulary[token_start:] + else: + tokens = vocabulary + repeated_tokens = self._find_repeated_tokens(tokens) + if repeated_tokens: + raise ValueError(f'The passed vocabulary has at least one repeated term. Please uniquify your dataset. The repeated terms are: {repeated_tokens}') + if self.mask_token is not None and self.mask_token in tokens: + mask_index = np.argwhere(vocabulary == self.mask_token)[-1] + raise ValueError(f'Found reserved mask token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: mask_token={self.mask_token} at vocabulary index {mask_index}') + if self.oov_token is not None and self.invert and (self.oov_token in tokens): + oov_index = np.argwhere(vocabulary == self.oov_token)[-1] + raise ValueError(f'Found reserved OOV token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: oov_token={self.oov_token} at vocabulary index {oov_index}') + new_vocab_size = token_start + len(tokens) + if self.max_tokens is not None and new_vocab_size > self.max_tokens: + raise ValueError(f'Attempted to set a vocabulary larger than the maximum vocab size. Received vocabulary size is {new_vocab_size}; `max_tokens` is {self.max_tokens}.') + self.lookup_table = self._lookup_table_from_tokens(tokens) + self._record_vocabulary_size() + if self.output_mode == 'tf_idf' and idf_weights is not None: + if len(vocabulary) != len(idf_weights): + raise ValueError(f'`idf_weights` must be the same length as vocabulary. len(idf_weights) is {len(idf_weights)}; len(vocabulary) is {len(vocabulary)}') + idf_weights = self._convert_to_ndarray(idf_weights) + if idf_weights.ndim != 1: + raise ValueError(f'TF-IDF data must be a 1-index array. Received: type(idf_weights)={type(idf_weights)}') + if found_special_tokens: + front_padding = 0 + front_padding_value = 0 + else: + front_padding = token_start + front_padding_value = np.average(idf_weights) + back_padding_value = 0 + if self.pad_to_max_tokens and self.max_tokens is not None: + back_padding = self.max_tokens - front_padding - len(idf_weights) + else: + back_padding = 0 + weights = np.pad(idf_weights, (front_padding, back_padding), 'constant', constant_values=(front_padding_value, back_padding_value)) + weights = tf.convert_to_tensor(weights, dtype=backend.floatx()) + self.idf_weights = tf.Variable(weights, trainable=False) + self.idf_weights_const = self.idf_weights.value() + + def build(self): + self.built = True + + def get_build_config(self): + return {} + + def build_from_config(self, config): + self.build() + + @property + def compute_dtype(self): + return self.vocabulary_dtype + + @property + def variable_dtype(self): + return self.vocabulary_dtype + + def compute_output_shape(self, input_shape): + if self.output_mode == 'int': + return input_shape + depth = self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size + return (input_shape[0], depth) + + def compute_output_spec(self, inputs): + if self.output_mode == 'int': + output_dtype = 'int64' + else: + output_dtype = backend.floatx() + output_shape = self.compute_output_shape(inputs.shape) + return backend.KerasTensor(output_shape, dtype=output_dtype) + + def adapt(self, data, steps=None): + self.reset_state() + if isinstance(data, tf.data.Dataset): + if steps is not None: + data = data.take(steps) + for batch in data: + self.update_state(batch) + else: + data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype) + if data.shape.rank == 1: + data = tf.expand_dims(data, -1) + self.update_state(data) + self.finalize_state() + + def update_state(self, data): + if self._has_input_vocabulary: + raise ValueError(f"Cannot adapt layer '{self.name}' after setting a static vocabulary via `vocabulary` argument or `set_vocabulary()` method.") + data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype) + if data.shape.rank == 0: + data = tf.expand_dims(data, 0) + if data.shape.rank == 1: + data = tf.expand_dims(data, 0) + (tokens, counts) = self._num_tokens(data) + self.token_counts.insert(tokens, counts + self.token_counts.lookup(tokens)) + if self.output_mode == 'tf_idf': + if isinstance(data, tf.RaggedTensor): + deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data) + else: + deduped_doc_data = [tf.unique(x)[0] for x in data] + deduped_doc_data = tf.concat(deduped_doc_data, axis=0) + (tokens, counts) = self._num_tokens(deduped_doc_data) + self.token_document_counts.insert(tokens, counts + self.token_document_counts.lookup(tokens)) + if isinstance(data, tf.RaggedTensor): + self.num_documents.assign_add(data.nrows()) + else: + self.num_documents.assign_add(tf.shape(data, out_type='int64')[0]) + + def finalize_state(self): + if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0): + if self.output_mode == 'tf_idf': + self.idf_weights_const = self.idf_weights.value() + self._record_vocabulary_size() + return + if self.mask_token is not None: + self.token_counts.remove(tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype)) + if self.oov_token is not None: + self.token_counts.remove(tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype)) + (tokens, counts) = self.token_counts.export() + sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1] + token_start = self._token_start_index() + if self.max_tokens: + max_learned_tokens = self.max_tokens - token_start + sorted_indices = sorted_indices[:max_learned_tokens] + tokens = tf.gather(tokens, sorted_indices) + self.lookup_table = self._lookup_table_from_tokens(tokens) + if self.output_mode == 'tf_idf': + token_document_counts = self.token_document_counts.lookup(tokens) + idf_weights = self._inverse_document_frequency(token_document_counts, self.num_documents) + idf_weights = tf.cast(idf_weights, backend.floatx()) + idf_weights = tf.pad(idf_weights, [[self._token_start_index(), 0]], constant_values=tf.reduce_mean(idf_weights)) + if self.pad_to_max_tokens and self.max_tokens is not None: + idf_weights = tf.pad(idf_weights, [[0, self.max_tokens - tf.size(idf_weights)]], constant_values=0) + self.idf_weights = tf.Variable(idf_weights, dtype=backend.floatx(), trainable=False) + self.idf_weights_const = self.idf_weights.value() + self.reset_state() + self._record_vocabulary_size() + + def reset_state(self): + if self._has_input_vocabulary: + return + self.token_counts.remove(self.token_counts.export()[0]) + if self.output_mode == 'tf_idf': + self.token_document_counts.remove(self.token_document_counts.export()[0]) + self.num_documents.assign(0) + + def call(self, inputs): + from keras.src.backend import tensorflow as tf_backend + self._ensure_known_vocab_size() + inputs = tf_utils.ensure_tensor(inputs, dtype=self._key_dtype) + original_shape = inputs.shape + if inputs.shape.rank == 0: + inputs = self._expand_dims(inputs, -1) + if isinstance(inputs, tf.SparseTensor): + lookups = tf.SparseTensor(inputs.indices, self._lookup_dense(inputs.values), inputs.dense_shape) + elif isinstance(inputs, tf.RaggedTensor): + lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs) + else: + lookups = self._lookup_dense(inputs) + if self.output_mode == 'int': + if original_shape.rank == 0: + lookups = tf.squeeze(lookups, -1) + return lookups + depth = self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size + idf_weights = self.idf_weights_const if self.output_mode == 'tf_idf' else None + output = numerical_utils.encode_categorical_inputs(lookups, output_mode='count' if self.output_mode == 'tf_idf' else self.output_mode, depth=depth, dtype=self._value_dtype, sparse=self.sparse, backend_module=tf_backend) + if self.output_mode == 'tf_idf': + if idf_weights is None: + raise ValueError("When `output_mode` is `'tf_idf'`, `idf_weights` must be provided.") + output = tf_backend.numpy.multiply(tf_backend.core.cast(output, idf_weights.dtype), idf_weights) + return output + + def _lookup_dense(self, inputs): + if tf.executing_eagerly() and backend.is_keras_tensor(inputs): + lookups = tf.zeros_like(inputs, dtype=self._value_dtype) + else: + lookups = self.lookup_table.lookup(inputs) + if self.mask_token is not None: + mask_locations = tf.equal(inputs, self._mask_key) + lookups = tf.where(mask_locations, self._mask_value, lookups) + if self.invert: + return lookups + lookup_checks = [] + if self.num_oov_indices == 0: + oov_indices = tf.where(tf.equal(lookups, -1)) + oov_inputs = tf.gather_nd(inputs, oov_indices) + msg = tf.strings.format('When `num_oov_indices=0` all inputs should be in vocabulary, found OOV values {}, consider setting `num_oov_indices=1`.', (oov_inputs,)) + assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg]) + lookup_checks.append(assertion) + elif self.num_oov_indices > 1: + if tf.as_dtype(self._key_dtype).is_integer: + oov_indices = tf.math.floormod(inputs, self.num_oov_indices) + else: + oov_indices = tf.strings.to_hash_bucket_fast(inputs, num_buckets=self.num_oov_indices) + oov_indices = oov_indices + self._oov_start_index() + oov_locations = tf.equal(lookups, self._default_value) + lookups = tf.where(oov_locations, oov_indices, lookups) + with tf.control_dependencies(lookup_checks): + return tf.identity(lookups) + + def save_own_variables(self, store): + if self.output_mode == 'tf_idf': + store['idf_weights'] = self.idf_weights_const.numpy() + + def load_own_variables(self, store): + if self.output_mode == 'tf_idf': + self.idf_weights.assign(store['idf_weights']) + self.idf_weights_const = self.idf_weights.value() + + def save_assets(self, dir_path): + if self.input_vocabulary is not None: + return + vocabulary = self.get_vocabulary(include_special_tokens=True) + vocabulary_filepath = tf.io.gfile.join(dir_path, 'vocabulary.txt') + with open(vocabulary_filepath, 'w') as f: + f.write('\n'.join([str(w) for w in vocabulary])) + + def load_assets(self, dir_path): + if self.input_vocabulary is not None: + return + vocabulary_filepath = tf.io.gfile.join(dir_path, 'vocabulary.txt') + with open(vocabulary_filepath, 'r') as f: + lines = f.read().split('\n') + if tf.as_dtype(self.vocabulary_dtype) == tf.string: + values = [str(line) for line in lines] + else: + values = [int(line) for line in lines] + if self.output_mode == 'tf_idf': + self.set_vocabulary(values, idf_weights=False) + else: + self.set_vocabulary(values) + + def _uninitialized_lookup_table(self): + with tf.init_scope(): + initializer = get_null_initializer(self._key_dtype, self._value_dtype) + return tf.lookup.StaticHashTable(initializer, self._default_value) + + def _lookup_table_from_tokens(self, tokens): + with tf.init_scope(): + token_start = self._token_start_index() + token_end = token_start + tf.size(tokens) + indices_dtype = self._key_dtype if self.invert else self._value_dtype + indices = tf.range(token_start, token_end, dtype=indices_dtype) + (keys, values) = (indices, tokens) if self.invert else (tokens, indices) + initializer = tf.lookup.KeyValueTensorInitializer(keys, values, self._key_dtype, self._value_dtype) + return tf.lookup.StaticHashTable(initializer, self._default_value) + + def _lookup_table_from_file(self, filename): + if self.invert: + key_index = tf.lookup.TextFileIndex.LINE_NUMBER + value_index = tf.lookup.TextFileIndex.WHOLE_LINE + else: + key_index = tf.lookup.TextFileIndex.WHOLE_LINE + value_index = tf.lookup.TextFileIndex.LINE_NUMBER + with tf.init_scope(): + initializer = tf.lookup.TextFileInitializer(filename=filename, key_dtype=self._key_dtype, key_index=key_index, value_dtype=self._value_dtype, value_index=value_index, value_index_offset=self._token_start_index()) + return tf.lookup.StaticHashTable(initializer, self._default_value) + + def _convert_to_ndarray(self, x): + return np.array(x) if isinstance(x, (list, tuple)) else x + + def _expand_dims(self, inputs, axis): + if isinstance(inputs, tf.SparseTensor): + return tf.sparse.expand_dims(inputs, axis) + else: + return tf.expand_dims(inputs, axis) + + def _oov_start_index(self): + return 1 if self.mask_token is not None and self.output_mode == 'int' else 0 + + def _token_start_index(self): + return self._oov_start_index() + self.num_oov_indices + + def _ensure_known_vocab_size(self): + if self.output_mode == 'int' or self.pad_to_max_tokens: + return + if self._frozen_vocab_size is None: + raise RuntimeError(f"When using `output_mode={self.output_mode}` and `pad_to_max_tokens=False`, you must set the layer's vocabulary before calling it. Either pass a `vocabulary` argument to the layer, or call `adapt` with some sample data.") + + def _ensure_vocab_size_unchanged(self): + if self.output_mode == 'int' or self.pad_to_max_tokens: + return + with tf.init_scope(): + new_vocab_size = self.vocabulary_size() + if self._frozen_vocab_size is not None and new_vocab_size != self._frozen_vocab_size: + raise RuntimeError(f'When using `output_mode={self.output_mode}` and `pad_to_max_tokens=False`, the vocabulary size cannot be changed after the layer is called. Old vocab size is {self._frozen_vocab_size}, new vocab size is {new_vocab_size}') + + def _find_repeated_tokens(self, vocabulary): + vocabulary_set = set(vocabulary) + if len(vocabulary) != len(vocabulary_set): + return [item for (item, count) in collections.Counter(vocabulary).items() if count > 1] + else: + return [] + + def _num_tokens(self, data): + if isinstance(data, tf.SparseTensor): + flat_values = data.values + elif isinstance(data, tf.RaggedTensor): + flat_values = data.flat_values + else: + flat_values = tf.reshape(data, [-1]) + (tokens, _, counts) = tf.unique_with_counts(flat_values, out_idx='int64') + return (tokens, counts) + + def _inverse_document_frequency(self, token_document_counts, num_documents): + return tf.math.log(1 + num_documents / (1 + token_document_counts)) + + def _tensor_vocab_to_numpy(self, vocabulary): + return vocabulary.numpy() + +def get_null_initializer(key_dtype, value_dtype): + + class NullInitializer(tf.lookup.KeyValueTensorInitializer): + + def __init__(self, key_dtype, value_dtype): + self._key_dtype = key_dtype + self._value_dtype = value_dtype + + @property + def key_dtype(self): + return self._key_dtype + + @property + def value_dtype(self): + return self._value_dtype + + def initialize(self, table): + pass + return NullInitializer(key_dtype, value_dtype) + +def listify_tensors(x): + if tf.is_tensor(x): + x = x.numpy() + if isinstance(x, np.ndarray): + x = x.tolist() + return x + +# File: keras-master/keras/src/layers/preprocessing/integer_lookup.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.IntegerLookup') +class IntegerLookup(IndexLookup): + + def __init__(self, max_tokens=None, num_oov_indices=1, mask_token=None, oov_token=-1, vocabulary=None, vocabulary_dtype='int64', idf_weights=None, invert=False, output_mode='int', sparse=False, pad_to_max_tokens=False, name=None, **kwargs): + if not tf.available: + raise ImportError('Layer IntegerLookup requires TensorFlow. Install it via `pip install tensorflow`.') + if max_tokens is not None and max_tokens <= 1: + raise ValueError(f'If `max_tokens` is set for `IntegerLookup`, it must be greater than 1. Received: max_tokens={max_tokens}') + if num_oov_indices < 0: + raise ValueError(f'The value of `num_oov_indices` argument for `IntegerLookup` must >= 0. Received: num_oov_indices={num_oov_indices}') + if sparse and backend.backend() != 'tensorflow': + raise ValueError('`sparse=True` can only be used with the TensorFlow backend.') + if vocabulary_dtype != 'int64': + raise ValueError(f"Only `vocabulary_dtype='int64'` is supported at this time. Received: vocabulary_dtype={vocabulary_dtype}") + super().__init__(max_tokens=max_tokens, num_oov_indices=num_oov_indices, mask_token=mask_token, oov_token=oov_token, vocabulary=vocabulary, vocabulary_dtype=vocabulary_dtype, idf_weights=idf_weights, invert=invert, output_mode=output_mode, sparse=sparse, pad_to_max_tokens=pad_to_max_tokens, name=name, **kwargs) + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.supports_jit = False + + def adapt(self, data, steps=None): + super().adapt(data, steps=steps) + + def get_config(self): + config = super().get_config() + if config['oov_token'] is not None: + config['oov_token'] = int(config['oov_token']) + if config['mask_token'] is not None: + config['mask_token'] = int(config['mask_token']) + if config['vocabulary'] is not None: + config['vocabulary'] = [int(v) for v in config['vocabulary']] + return config + + def call(self, inputs): + if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)): + inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs)) + outputs = super().call(inputs) + return backend_utils.convert_tf_tensor(outputs) + +# File: keras-master/keras/src/layers/preprocessing/mel_spectrogram.py +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +_MEL_BREAK_FREQUENCY_HERTZ = 700.0 +_MEL_HIGH_FREQUENCY_Q = 1127.0 + +@keras_export('keras.layers.MelSpectrogram') +class MelSpectrogram(TFDataLayer): + + def __init__(self, fft_length=2048, sequence_stride=512, sequence_length=None, window='hann', sampling_rate=16000, num_mel_bins=128, min_freq=20.0, max_freq=None, power_to_db=True, top_db=80.0, mag_exp=2.0, min_power=1e-10, ref_power=1.0, **kwargs): + self.fft_length = fft_length + self.sequence_stride = sequence_stride + self.sequence_length = sequence_length or fft_length + self.window = window + self.sampling_rate = sampling_rate + self.num_mel_bins = num_mel_bins + self.min_freq = min_freq + self.max_freq = max_freq or int(sampling_rate / 2) + self.power_to_db = power_to_db + self.top_db = top_db + self.mag_exp = mag_exp + self.min_power = min_power + self.ref_power = ref_power + super().__init__(**kwargs) + + def call(self, inputs): + dtype = 'float32' if self.compute_dtype not in ['float32', 'float64'] else self.compute_dtype + inputs = self.backend.convert_to_tensor(inputs, dtype=dtype) + outputs = self._spectrogram(inputs) + outputs = self._melscale(outputs) + if self.power_to_db: + outputs = self._dbscale(outputs) + outputs = self.backend.numpy.swapaxes(outputs, -1, -2) + outputs = self.backend.cast(outputs, self.compute_dtype) + return outputs + + def _spectrogram(self, inputs): + (real, imag) = self.backend.math.stft(inputs, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, window=self.window, center=True) + spec = self.backend.numpy.sqrt(self.backend.numpy.add(self.backend.numpy.square(real), self.backend.numpy.square(imag))) + spec = self.backend.numpy.power(spec, self.mag_exp) + return spec + + def _melscale(self, inputs): + matrix = self.linear_to_mel_weight_matrix(num_mel_bins=self.num_mel_bins, num_spectrogram_bins=self.backend.shape(inputs)[-1], sampling_rate=self.sampling_rate, lower_edge_hertz=self.min_freq, upper_edge_hertz=self.max_freq) + return self.backend.numpy.tensordot(inputs, matrix, axes=1) + + def _dbscale(self, inputs): + log_spec = 10.0 * self.backend.numpy.log10(self.backend.numpy.maximum(inputs, self.min_power)) + ref_value = self.backend.numpy.abs(self.backend.convert_to_tensor(self.ref_power)) + log_spec -= 10.0 * self.backend.numpy.log10(self.backend.numpy.maximum(ref_value, self.min_power)) + log_spec = self.backend.numpy.maximum(log_spec, self.backend.numpy.max(log_spec) - self.top_db) + return log_spec + + def _hertz_to_mel(self, frequencies_hertz): + return _MEL_HIGH_FREQUENCY_Q * self.backend.numpy.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ) + + def linear_to_mel_weight_matrix(self, num_mel_bins=20, num_spectrogram_bins=129, sampling_rate=8000, lower_edge_hertz=125.0, upper_edge_hertz=3800.0, dtype='float32'): + sampling_rate = self.backend.cast(sampling_rate, dtype) + lower_edge_hertz = self.backend.convert_to_tensor(lower_edge_hertz, dtype) + upper_edge_hertz = self.backend.convert_to_tensor(upper_edge_hertz, dtype) + zero = self.backend.convert_to_tensor(0.0, dtype) + bands_to_zero = 1 + nyquist_hertz = sampling_rate / 2.0 + linear_frequencies = self.backend.numpy.linspace(zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:] + spectrogram_bins_mel = self.backend.numpy.expand_dims(self._hertz_to_mel(linear_frequencies), 1) + band_edges_mel = self.backend.math.extract_sequences(self.backend.numpy.linspace(self._hertz_to_mel(lower_edge_hertz), self._hertz_to_mel(upper_edge_hertz), num_mel_bins + 2), sequence_length=3, sequence_stride=1) + (lower_edge_mel, center_mel, upper_edge_mel) = tuple((self.backend.numpy.reshape(t, [1, num_mel_bins]) for t in self.backend.numpy.split(band_edges_mel, 3, axis=1))) + lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel) + upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel) + mel_weights_matrix = self.backend.numpy.maximum(zero, self.backend.numpy.minimum(lower_slopes, upper_slopes)) + return self.backend.numpy.pad(mel_weights_matrix, [[bands_to_zero, 0], [0, 0]]) + + def compute_output_shape(self, input_shape): + if len(input_shape) == 1: + output_shape = [self.num_mel_bins, (input_shape[0] + self.sequence_stride + 1) // self.sequence_stride if input_shape[0] is not None else None] + else: + output_shape = [input_shape[0], self.num_mel_bins, (input_shape[1] + self.sequence_stride + 1) // self.sequence_stride if input_shape[1] is not None else None] + return output_shape + + def get_config(self): + config = super().get_config() + config.update({'fft_length': self.fft_length, 'sequence_stride': self.sequence_stride, 'sequence_length': self.sequence_length, 'window': self.window, 'sampling_rate': self.sampling_rate, 'num_mel_bins': self.num_mel_bins, 'min_freq': self.min_freq, 'max_freq': self.max_freq, 'power_to_db': self.power_to_db, 'top_db': self.top_db, 'mag_exp': self.mag_exp, 'min_power': self.min_power, 'ref_power': self.ref_power}) + return config + +# File: keras-master/keras/src/layers/preprocessing/normalization.py +import math +import numpy as np +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.Normalization') +class Normalization(TFDataLayer): + + def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs): + super().__init__(**kwargs) + if axis is None: + axis = () + elif isinstance(axis, int): + axis = (axis,) + else: + axis = tuple(axis) + self.axis = axis + self.input_mean = mean + self.input_variance = variance + self.invert = invert + self.supports_masking = True + self._build_input_shape = None + self.mean = None + if (mean is not None) != (variance is not None): + raise ValueError(f'When setting values directly, both `mean` and `variance` must be set. Received: mean={mean} and variance={variance}') + + def build(self, input_shape): + if input_shape is None: + return + ndim = len(input_shape) + self._build_input_shape = input_shape + if any((a < -ndim or a >= ndim for a in self.axis)): + raise ValueError(f'All `axis` values must be in the range [-ndim, ndim). Received inputs with ndim={ndim}, while axis={self.axis}') + self._keep_axis = tuple(sorted([d if d >= 0 else d + ndim for d in self.axis])) + for d in self._keep_axis: + if input_shape[d] is None: + raise ValueError(f'All `axis` values to be kept must have a known shape. Received axis={self.axis}, inputs.shape={input_shape}, with unknown axis at index {d}') + self._reduce_axis = tuple((d for d in range(ndim) if d not in self._keep_axis)) + self._reduce_axis_mask = [0 if d in self._keep_axis else 1 for d in range(ndim)] + self._broadcast_shape = [input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)] + mean_and_var_shape = tuple((input_shape[d] for d in self._keep_axis)) + self._mean_and_var_shape = mean_and_var_shape + if self.input_mean is None: + self.adapt_mean = self.add_weight(name='mean', shape=mean_and_var_shape, initializer='zeros', trainable=False) + self.adapt_variance = self.add_weight(name='variance', shape=mean_and_var_shape, initializer='ones', trainable=False) + self.count = self.add_weight(name='count', shape=(), dtype='int', initializer='zeros', trainable=False) + self.built = True + self.finalize_state() + else: + mean = ops.convert_to_tensor(self.input_mean) + variance = ops.convert_to_tensor(self.input_variance) + mean = ops.reshape(mean, self._broadcast_shape) + variance = ops.reshape(variance, self._broadcast_shape) + self.mean = ops.cast(mean, dtype=self.compute_dtype) + self.variance = ops.cast(variance, dtype=self.compute_dtype) + self.built = True + + def adapt(self, data): + if isinstance(data, np.ndarray) or backend.is_tensor(data): + input_shape = data.shape + elif isinstance(data, tf.data.Dataset): + input_shape = tuple(data.element_spec.shape) + if len(input_shape) == 1: + data = data.batch(128) + input_shape = tuple(data.element_spec.shape) + if not self.built: + self.build(input_shape) + else: + for d in self._keep_axis: + if input_shape[d] != self._build_input_shape[d]: + raise ValueError(f'The layer was built with input_shape={self._build_input_shape}, but adapt() is being called with data with an incompatible shape, data.shape={input_shape}') + if isinstance(data, np.ndarray): + total_mean = np.mean(data, axis=self._reduce_axis) + total_var = np.var(data, axis=self._reduce_axis) + elif backend.is_tensor(data): + total_mean = ops.mean(data, axis=self._reduce_axis) + total_var = ops.var(data, axis=self._reduce_axis) + elif isinstance(data, tf.data.Dataset): + total_mean = ops.zeros(self._mean_and_var_shape) + total_var = ops.zeros(self._mean_and_var_shape) + total_count = 0 + for batch in data: + batch = backend.convert_to_tensor(batch, dtype=self.compute_dtype) + batch_mean = ops.mean(batch, axis=self._reduce_axis) + batch_var = ops.var(batch, axis=self._reduce_axis) + if self._reduce_axis: + batch_reduce_shape = (batch.shape[d] for d in self._reduce_axis) + batch_count = math.prod(batch_reduce_shape) + else: + batch_count = 1 + total_count += batch_count + batch_weight = float(batch_count) / total_count + existing_weight = 1.0 - batch_weight + new_total_mean = total_mean * existing_weight + batch_mean * batch_weight + total_var = (total_var + (total_mean - new_total_mean) ** 2) * existing_weight + (batch_var + (batch_mean - new_total_mean) ** 2) * batch_weight + total_mean = new_total_mean + self.adapt_mean.assign(total_mean) + self.adapt_variance.assign(total_var) + self.finalize_state() + + def finalize_state(self): + if self.input_mean is not None or not self.built: + return + self.mean = ops.reshape(self.adapt_mean, self._broadcast_shape) + self.mean = ops.cast(self.mean, self.compute_dtype) + self.variance = ops.reshape(self.adapt_variance, self._broadcast_shape) + self.variance = ops.cast(self.variance, self.compute_dtype) + + def call(self, inputs): + if self.mean is None: + raise ValueError('You must call `.build(input_shape)` on the layer before using it.') + inputs = self.backend.core.convert_to_tensor(inputs, dtype=self.compute_dtype) + mean = self.convert_weight(self.mean) + variance = self.convert_weight(self.variance) + if self.invert: + return self.backend.numpy.add(mean, self.backend.numpy.multiply(inputs, self.backend.numpy.maximum(self.backend.numpy.sqrt(variance), backend.epsilon()))) + else: + return self.backend.numpy.divide(self.backend.numpy.subtract(inputs, mean), self.backend.numpy.maximum(self.backend.numpy.sqrt(variance), backend.epsilon())) + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = super().get_config() + config.update({'axis': self.axis, 'invert': self.invert, 'mean': np.array(self.input_mean).tolist(), 'variance': np.array(self.input_variance).tolist()}) + return config + + def load_own_variables(self, store): + super().load_own_variables(store) + self.finalize_state() + + def get_build_config(self): + if self._build_input_shape: + return {'input_shape': self._build_input_shape} + + def build_from_config(self, config): + if config: + self.build(config['input_shape']) + +# File: keras-master/keras/src/layers/preprocessing/rescaling.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.saving import serialization_lib + +@keras_export('keras.layers.Rescaling') +class Rescaling(TFDataLayer): + + def __init__(self, scale, offset=0.0, **kwargs): + super().__init__(**kwargs) + self.scale = scale + self.offset = offset + self.supports_masking = True + + def call(self, inputs): + dtype = self.compute_dtype + scale = self.backend.cast(self.scale, dtype) + offset = self.backend.cast(self.offset, dtype) + scale_shape = self.backend.core.shape(scale) + if len(scale_shape) > 0 and backend.image_data_format() == 'channels_first': + scale = self.backend.numpy.reshape(scale, scale_shape + (1,) * (3 - len(scale_shape))) + return self.backend.cast(inputs, dtype) * scale + offset + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + config = super().get_config() + config.update({'scale': serialization_lib.serialize_keras_object(self.scale), 'offset': serialization_lib.serialize_keras_object(self.offset)}) + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + config = config.copy() + config['scale'] = serialization_lib.deserialize_keras_object(config['scale'], custom_objects=custom_objects) + config['offset'] = serialization_lib.deserialize_keras_object(config['offset'], custom_objects=custom_objects) + return cls(**config) + +# File: keras-master/keras/src/layers/preprocessing/string_lookup.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.StringLookup') +class StringLookup(IndexLookup): + + def __init__(self, max_tokens=None, num_oov_indices=1, mask_token=None, oov_token='[UNK]', vocabulary=None, idf_weights=None, invert=False, output_mode='int', pad_to_max_tokens=False, sparse=False, encoding='utf-8', name=None, **kwargs): + if not tf.available: + raise ImportError('Layer StringLookup requires TensorFlow. Install it via `pip install tensorflow`.') + if sparse and backend.backend() != 'tensorflow': + raise ValueError('`sparse=True` can only be used with the TensorFlow backend.') + self.encoding = encoding + super().__init__(max_tokens=max_tokens, num_oov_indices=num_oov_indices, mask_token=mask_token, oov_token=oov_token, vocabulary=vocabulary, idf_weights=idf_weights, invert=invert, output_mode=output_mode, pad_to_max_tokens=pad_to_max_tokens, sparse=sparse, name=name, vocabulary_dtype='string', **kwargs) + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.supports_jit = False + + def adapt(self, data, steps=None): + super().adapt(data, steps=steps) + + def _tensor_vocab_to_numpy(self, vocabulary): + vocabulary = vocabulary.numpy() + return np.array([tf.compat.as_text(x, self.encoding) for x in vocabulary]) + + def get_config(self): + config = {'encoding': self.encoding} + base_config = super().get_config() + del base_config['vocabulary_dtype'] + return {**base_config, **config} + + def call(self, inputs): + if isinstance(inputs, (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)): + tf_inputs = True + else: + tf_inputs = False + if not isinstance(inputs, (np.ndarray, list, tuple)): + inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs)) + outputs = super().call(inputs) + if not tf_inputs: + outputs = backend_utils.convert_tf_tensor(outputs) + return outputs + +# File: keras-master/keras/src/layers/preprocessing/text_vectorization.py +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.preprocessing.index_lookup import listify_tensors +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.saving import serialization_lib +from keras.src.utils import argument_validation +from keras.src.utils import backend_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras.layers.TextVectorization') +class TextVectorization(Layer): + + def __init__(self, max_tokens=None, standardize='lower_and_strip_punctuation', split='whitespace', ngrams=None, output_mode='int', output_sequence_length=None, pad_to_max_tokens=False, vocabulary=None, idf_weights=None, sparse=False, ragged=False, encoding='utf-8', name=None, **kwargs): + if not tf.available: + raise ImportError('Layer TextVectorization requires TensorFlow. Install it via `pip install tensorflow`.') + if sparse and backend.backend() != 'tensorflow': + raise ValueError('`sparse=True` can only be used with the TensorFlow backend.') + if ragged and backend.backend() != 'tensorflow': + raise ValueError('`ragged=True` can only be used with the TensorFlow backend.') + argument_validation.validate_string_arg(standardize, allowable_strings=('lower_and_strip_punctuation', 'lower', 'strip_punctuation'), caller_name=self.__class__.__name__, arg_name='standardize', allow_none=True, allow_callables=True) + argument_validation.validate_string_arg(split, allowable_strings=('whitespace', 'character'), caller_name=self.__class__.__name__, arg_name='split', allow_none=True, allow_callables=True) + if output_mode == 'binary': + output_mode = 'multi_hot' + if output_mode == 'tf-idf': + output_mode = 'tf_idf' + argument_validation.validate_string_arg(output_mode, allowable_strings=('int', 'one_hot', 'multi_hot', 'count', 'tf_idf'), caller_name=self.__class__.__name__, arg_name='output_mode') + if not (ngrams is None or isinstance(ngrams, int) or (isinstance(ngrams, tuple) and all((isinstance(item, int) for item in ngrams)))): + raise ValueError(f'`ngrams` must be None, an integer, or a tuple of integers. Received: ngrams={ngrams}') + if output_mode == 'int' and (not (isinstance(output_sequence_length, int) or output_sequence_length is None)): + raise ValueError(f"`output_sequence_length` must be either None or an integer when `output_mode` is 'int'. Received: output_sequence_length={output_sequence_length}") + if output_mode != 'int' and output_sequence_length is not None: + raise ValueError(f"`output_sequence_length` must not be set if `output_mode` is not 'int'. Received output_sequence_length={output_sequence_length}.") + if ragged and output_mode != 'int': + raise ValueError(f"`ragged` must not be true if `output_mode` is `'int'`. Received: ragged={ragged} and output_mode={output_mode}") + self._max_tokens = max_tokens + self._standardize = standardize + self._split = split + self._ngrams_arg = ngrams + if isinstance(ngrams, int): + self._ngrams = tuple(range(1, ngrams + 1)) + else: + self._ngrams = ngrams + self._ragged = ragged + self._output_mode = output_mode + self._output_sequence_length = output_sequence_length + self._encoding = encoding + self._has_input_vocabulary = kwargs.pop('has_input_vocabulary', vocabulary is not None) + vocabulary_size = kwargs.pop('vocabulary_size', None) + super().__init__(name=name, **kwargs) + self._lookup_layer = StringLookup(max_tokens=max_tokens, vocabulary=vocabulary, idf_weights=idf_weights, pad_to_max_tokens=pad_to_max_tokens, mask_token='', output_mode=output_mode, sparse=sparse, has_input_vocabulary=self._has_input_vocabulary, encoding=encoding, vocabulary_size=vocabulary_size) + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.supports_jit = False + + @property + def compute_dtype(self): + return 'string' + + @property + def variable_dtype(self): + return 'string' + + def build(self, input_shape=None): + pass + + def compute_output_shape(self, input_shape): + if self._output_mode == 'int': + return (input_shape[0], self._output_sequence_length) + if self._split is None: + if len(input_shape) <= 1: + input_shape = tuple(input_shape) + (1,) + else: + input_shape = tuple(input_shape) + (None,) + return self._lookup_layer.compute_output_shape(input_shape) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + if self._output_mode == 'int': + output_dtype = 'int64' + else: + output_dtype = backend.floatx() + return backend.KerasTensor(output_shape, dtype=output_dtype) + + def adapt(self, data, batch_size=None, steps=None): + self.reset_state() + if isinstance(data, tf.data.Dataset): + if steps is not None: + data = data.take(steps) + for batch in data: + self.update_state(batch) + else: + data = tf_utils.ensure_tensor(data, dtype='string') + if data.shape.rank == 1: + data = tf.expand_dims(data, -1) + self.update_state(data) + self.finalize_state() + + def update_state(self, data): + self._lookup_layer.update_state(self._preprocess(data)) + + def finalize_state(self): + self._lookup_layer.finalize_state() + + def reset_state(self): + self._lookup_layer.reset_state() + + def get_vocabulary(self, include_special_tokens=True): + return self._lookup_layer.get_vocabulary(include_special_tokens) + + def vocabulary_size(self): + return self._lookup_layer.vocabulary_size() + + def get_config(self): + config = {'max_tokens': self._lookup_layer.max_tokens, 'standardize': self._standardize, 'split': self._split, 'ngrams': self._ngrams_arg, 'output_mode': self._output_mode, 'output_sequence_length': self._output_sequence_length, 'pad_to_max_tokens': self._lookup_layer.pad_to_max_tokens, 'sparse': self._lookup_layer.sparse, 'ragged': self._ragged, 'vocabulary': listify_tensors(self._lookup_layer.input_vocabulary), 'idf_weights': listify_tensors(self._lookup_layer.input_idf_weights), 'encoding': self._encoding, 'vocabulary_size': self.vocabulary_size()} + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + if not isinstance(config['standardize'], str): + config['standardize'] = serialization_lib.deserialize_keras_object(config['standardize']) + if not isinstance(config['split'], str): + config['split'] = serialization_lib.deserialize_keras_object(config['split']) + if isinstance(config['ngrams'], list): + config['ngrams'] = tuple(config['ngrams']) + return cls(**config) + + def set_vocabulary(self, vocabulary, idf_weights=None): + self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights) + + def _preprocess(self, inputs): + inputs = tf_utils.ensure_tensor(inputs, dtype=tf.string) + if self._standardize in ('lower', 'lower_and_strip_punctuation'): + inputs = tf.strings.lower(inputs) + if self._standardize in ('strip_punctuation', 'lower_and_strip_punctuation'): + inputs = tf.strings.regex_replace(inputs, '[!"#$%&()\\*\\+,-\\./:;<=>?@\\[\\\\\\]^_`{|}~\\\']', '') + if callable(self._standardize): + inputs = self._standardize(inputs) + if self._split is not None: + if inputs.shape.rank > 1: + if inputs.shape[-1] != 1: + raise ValueError(f'When using `TextVectorization` to tokenize strings, the input rank must be 1 or the last shape dimension must be 1. Received: inputs.shape={inputs.shape} with rank={inputs.shape.rank}') + else: + inputs = tf.squeeze(inputs, axis=-1) + if self._split == 'whitespace': + inputs = tf.strings.split(inputs) + elif self._split == 'character': + inputs = tf.strings.unicode_split(inputs, 'UTF-8') + elif callable(self._split): + inputs = self._split(inputs) + if self._ngrams is not None: + inputs = tf.strings.ngrams(inputs, ngram_width=self._ngrams, separator=' ') + return inputs + + def call(self, inputs): + if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)): + inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs)) + inputs = self._preprocess(inputs) + if self._output_mode is None: + outputs = inputs + lookup_data = self._lookup_layer.call(inputs) + if self._output_mode != 'int': + return backend_utils.convert_tf_tensor(lookup_data) + if isinstance(lookup_data, tf.RaggedTensor) and (not self._ragged): + shape = lookup_data.shape.as_list() + shape[-1] = self._output_sequence_length + outputs = lookup_data.to_tensor(default_value=0, shape=shape) + else: + outputs = lookup_data + if self._output_sequence_length is not None: + outputs = outputs[..., :self._output_sequence_length] + if not self._ragged: + shape = tf.shape(outputs) + padded_shape = tf.concat((shape[:-1], [self._output_sequence_length]), 0) + (padding, _) = tf.required_space_to_batch_paddings(shape, padded_shape) + outputs = tf.pad(outputs, padding) + return backend_utils.convert_tf_tensor(outputs) + + def save_own_variables(self, store): + self._lookup_layer.save_own_variables(store) + + def load_own_variables(self, store): + self._lookup_layer.load_own_variables(store) + + def save_assets(self, dir_path): + self._lookup_layer.save_assets(dir_path) + + def load_assets(self, dir_path): + self._lookup_layer.load_assets(dir_path) + +# File: keras-master/keras/src/layers/preprocessing/tf_data_layer.py +import keras.src.backend +from keras.src import tree +from keras.src.layers.layer import Layer +from keras.src.random.seed_generator import SeedGenerator +from keras.src.utils import backend_utils +from keras.src.utils import jax_utils +from keras.src.utils import tracking + +class TFDataLayer(Layer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.backend = backend_utils.DynamicBackend() + self._allow_non_tensor_positional_args = True + + def __call__(self, inputs, **kwargs): + sample_input = tree.flatten(inputs)[0] + if not isinstance(sample_input, keras.KerasTensor) and backend_utils.in_tf_graph() and (not jax_utils.is_in_jax_tracing_scope(sample_input)): + self.backend.set_backend('tensorflow') + inputs = tree.map_structure(lambda x: self.backend.convert_to_tensor(x, dtype=self.compute_dtype), inputs) + switch_convert_input_args = False + if self._convert_input_args: + self._convert_input_args = False + switch_convert_input_args = True + try: + outputs = super().__call__(inputs, **kwargs) + finally: + self.backend.reset() + if switch_convert_input_args: + self._convert_input_args = True + return outputs + return super().__call__(inputs, **kwargs) + + @tracking.no_automatic_dependency_tracking + def _get_seed_generator(self, backend=None): + if backend is None or backend == keras.backend.backend(): + return self.generator + if not hasattr(self, '_backend_generators'): + self._backend_generators = {} + if backend in self._backend_generators: + return self._backend_generators[backend] + seed_generator = SeedGenerator(self.seed, backend=self.backend) + self._backend_generators[backend] = seed_generator + return seed_generator + + def convert_weight(self, weight): + if self.backend.name == keras.backend.backend(): + return weight + else: + weight = keras.ops.convert_to_numpy(weight) + return self.backend.convert_to_tensor(weight) + +# File: keras-master/keras/src/layers/regularization/activity_regularization.py +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.ActivityRegularization') +class ActivityRegularization(Layer): + + def __init__(self, l1=0.0, l2=0.0, **kwargs): + super().__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs) + self.supports_masking = True + self.l1 = l1 + self.l2 = l2 + self.built = True + + def call(self, inputs): + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + base_config.pop('activity_regularizer', None) + config = {'l1': self.l1, 'l2': self.l2} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/regularization/alpha_dropout.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.AlphaDropout') +class AlphaDropout(Layer): + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}') + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + noise_shape = self._get_concrete_noise_shape(inputs, self.noise_shape) + alpha = 1.6732632423543772 + scale = 1.0507009873554805 + alpha_p = -alpha * scale + kept_idx = ops.greater_equal(ops.random.uniform(noise_shape, seed=self.seed_generator), self.rate) + kept_idx = ops.cast(kept_idx, inputs.dtype) + a = ((1 - self.rate) * (1 + self.rate * alpha_p ** 2)) ** (-0.5) + b = -a * alpha_p * self.rate + x = inputs * kept_idx + alpha_p * (1 - kept_idx) + return a * x + b + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def _get_concrete_noise_shape(self, inputs, noise_shape): + if noise_shape is None: + return ops.shape(inputs) + concrete_inputs_shape = ops.shape(inputs) + concrete_noise_shape = [] + for (i, value) in enumerate(noise_shape): + concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value) + return concrete_noise_shape + + def get_config(self): + base_config = super().get_config() + config = {'rate': self.rate, 'seed': self.seed, 'noise_shape': self.noise_shape} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/regularization/dropout.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Dropout') +class Dropout(Layer): + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}') + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + return backend.random.dropout(inputs, self.rate, noise_shape=self.noise_shape, seed=self.seed_generator) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'rate': self.rate, 'seed': self.seed, 'noise_shape': self.noise_shape} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/regularization/gaussian_dropout.py +import math +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export + +@keras_export('keras.layers.GaussianDropout') +class GaussianDropout(layers.Layer): + + def __init__(self, rate, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}') + self.rate = rate + self.seed = seed + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + stddev = math.sqrt(self.rate / (1.0 - self.rate)) + return inputs * backend.random.normal(shape=ops.shape(inputs), mean=1.0, stddev=stddev, dtype=self.compute_dtype, seed=self.seed_generator) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'rate': self.rate, 'seed': self.seed} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/regularization/gaussian_noise.py +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export + +@keras_export('keras.layers.GaussianNoise') +class GaussianNoise(layers.Layer): + + def __init__(self, stddev, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= stddev <= 1: + raise ValueError(f'Invalid value received for argument `stddev`. Expected a float value between 0 and 1. Received: stddev={stddev}') + self.stddev = stddev + self.seed = seed + if stddev > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.stddev > 0: + return inputs + backend.random.normal(shape=ops.shape(inputs), mean=0.0, stddev=self.stddev, dtype=self.compute_dtype, seed=self.seed_generator) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = {'stddev': self.stddev, 'seed': self.seed} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/regularization/spatial_dropout.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.regularization.dropout import Dropout + +class BaseSpatialDropout(Dropout): + + def __init__(self, rate, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + + def call(self, inputs, training=False): + if training and self.rate > 0: + return backend.random.dropout(inputs, self.rate, noise_shape=self._get_noise_shape(inputs), seed=self.seed_generator) + return inputs + + def get_config(self): + return {'rate': self.rate, 'seed': self.seed, 'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.layers.SpatialDropout1D') +class SpatialDropout1D(BaseSpatialDropout): + + def __init__(self, rate, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.input_spec = InputSpec(ndim=3) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + return (input_shape[0], 1, input_shape[2]) + +@keras_export('keras.layers.SpatialDropout2D') +class SpatialDropout2D(BaseSpatialDropout): + + def __init__(self, rate, data_format=None, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(ndim=4) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + if self.data_format == 'channels_first': + return (input_shape[0], input_shape[1], 1, 1) + elif self.data_format == 'channels_last': + return (input_shape[0], 1, 1, input_shape[3]) + + def get_config(self): + base_config = super().get_config() + config = {'data_format': self.data_format} + return {**base_config, **config} + +@keras_export('keras.layers.SpatialDropout3D') +class SpatialDropout3D(BaseSpatialDropout): + + def __init__(self, rate, data_format=None, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(ndim=5) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + if self.data_format == 'channels_first': + return (input_shape[0], input_shape[1], 1, 1, 1) + elif self.data_format == 'channels_last': + return (input_shape[0], 1, 1, 1, input_shape[4]) + + def get_config(self): + base_config = super().get_config() + config = {'data_format': self.data_format} + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/cropping1d.py +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.Cropping1D') +class Cropping1D(Layer): + + def __init__(self, cropping=(1, 1), **kwargs): + super().__init__(**kwargs) + self.cropping = argument_validation.standardize_tuple(cropping, 2, 'cropping', allow_zero=True) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + if input_shape[1] is not None: + length = input_shape[1] - self.cropping[0] - self.cropping[1] + if length <= 0: + raise ValueError(f'`cropping` parameter of `Cropping1D` layer must be smaller than the input length. Received: input_shape={input_shape}, cropping={self.cropping}') + else: + length = None + return (input_shape[0], length, input_shape[2]) + + def call(self, inputs): + if inputs.shape[1] is not None and sum(self.cropping) >= inputs.shape[1]: + raise ValueError(f'`cropping` parameter of `Cropping1D` layer must be smaller than the input length. Received: inputs.shape={inputs.shape}, cropping={self.cropping}') + if self.cropping[1] == 0: + return inputs[:, self.cropping[0]:, :] + else: + return inputs[:, self.cropping[0]:-self.cropping[1], :] + + def get_config(self): + config = {'cropping': self.cropping} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/cropping2d.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.Cropping2D') +class Cropping2D(Layer): + + def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(cropping, int): + if cropping < 0: + raise ValueError(f'`cropping` cannot be negative. Received: cropping={cropping}.') + self.cropping = ((cropping, cropping), (cropping, cropping)) + elif hasattr(cropping, '__len__'): + if len(cropping) != 2: + raise ValueError(f'`cropping` should have two elements. Received: cropping={cropping}.') + height_cropping = argument_validation.standardize_tuple(cropping[0], 2, '1st entry of cropping', allow_zero=True) + width_cropping = argument_validation.standardize_tuple(cropping[1], 2, '2nd entry of cropping', allow_zero=True) + self.cropping = (height_cropping, width_cropping) + else: + raise ValueError(f'`cropping` should be either an int, a tuple of 2 ints (symmetric_height_crop, symmetric_width_crop), or a tuple of 2 tuples of 2 ints ((top_crop, bottom_crop), (left_crop, right_crop)). Received: cropping={cropping}.') + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + if self.data_format == 'channels_first': + if input_shape[2] is not None and sum(self.cropping[0]) >= input_shape[2] or (input_shape[3] is not None and sum(self.cropping[1]) >= input_shape[3]): + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: input_shape={input_shape}, cropping={self.cropping}') + return (input_shape[0], input_shape[1], input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] is not None else None, input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] is not None else None) + else: + if input_shape[1] is not None and sum(self.cropping[0]) >= input_shape[1] or (input_shape[2] is not None and sum(self.cropping[1]) >= input_shape[2]): + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: input_shape={input_shape}, cropping={self.cropping}') + return (input_shape[0], input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] is not None else None, input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] is not None else None, input_shape[3]) + + def call(self, inputs): + if self.data_format == 'channels_first': + if inputs.shape[2] is not None and sum(self.cropping[0]) >= inputs.shape[2] or (inputs.shape[3] is not None and sum(self.cropping[1]) >= inputs.shape[3]): + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: inputs.shape={inputs.shape}, cropping={self.cropping}') + if self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:] + elif self.cropping[0][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1]] + elif self.cropping[1][1] == 0: + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:] + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1]] + else: + if inputs.shape[1] is not None and sum(self.cropping[0]) >= inputs.shape[1] or (inputs.shape[2] is not None and sum(self.cropping[1]) >= inputs.shape[2]): + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: inputs.shape={inputs.shape}, cropping={self.cropping}') + if self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :] + elif self.cropping[0][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], :] + elif self.cropping[1][1] == 0: + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, :] + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], :] + + def get_config(self): + config = {'cropping': self.cropping, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/cropping3d.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.Cropping3D') +class Cropping3D(Layer): + + def __init__(self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(cropping, int): + if cropping < 0: + raise ValueError(f'`cropping` cannot be negative. Received: cropping={cropping}.') + self.cropping = ((cropping, cropping), (cropping, cropping), (cropping, cropping)) + elif hasattr(cropping, '__len__'): + if len(cropping) != 3: + raise ValueError(f'`cropping` should have 3 elements. Received: {cropping}.') + dim1_cropping = argument_validation.standardize_tuple(cropping[0], 2, '1st entry of cropping', allow_zero=True) + dim2_cropping = argument_validation.standardize_tuple(cropping[1], 2, '2nd entry of cropping', allow_zero=True) + dim3_cropping = argument_validation.standardize_tuple(cropping[2], 2, '3rd entry of cropping', allow_zero=True) + self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping) + else: + raise ValueError(f'`cropping` should be either an int, a tuple of 3 ints (symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), or a tuple of 3 tuples of 2 ints ((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim2_crop)). Received: {cropping}.') + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + if self.data_format == 'channels_first': + spatial_dims = list(input_shape[2:5]) + else: + spatial_dims = list(input_shape[1:4]) + for index in range(0, 3): + if spatial_dims[index] is None: + continue + spatial_dims[index] -= sum(self.cropping[index]) + if spatial_dims[index] <= 0: + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: input_shape={input_shape}, cropping={self.cropping}') + if self.data_format == 'channels_first': + return (input_shape[0], input_shape[1], *spatial_dims) + else: + return (input_shape[0], *spatial_dims, input_shape[4]) + + def call(self, inputs): + if self.data_format == 'channels_first': + spatial_dims = list(inputs.shape[2:5]) + else: + spatial_dims = list(inputs.shape[1:4]) + for index in range(0, 3): + if spatial_dims[index] is None: + continue + spatial_dims[index] -= sum(self.cropping[index]) + if spatial_dims[index] <= 0: + raise ValueError(f'Values in `cropping` argument should be smaller than the corresponding spatial dimension of the input. Received: inputs.shape={inputs.shape}, cropping={self.cropping}') + if self.data_format == 'channels_first': + if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:] + elif self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]] + elif self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:] + elif self.cropping[0][1] == self.cropping[2][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:] + elif self.cropping[0][1] == 0: + return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]] + elif self.cropping[1][1] == 0: + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]] + elif self.cropping[2][1] == 0: + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:] + return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]] + else: + if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:, :] + elif self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1], :] + elif self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:, :] + elif self.cropping[0][1] == self.cropping[2][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:, :] + elif self.cropping[0][1] == 0: + return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1], :] + elif self.cropping[1][1] == 0: + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1], :] + elif self.cropping[2][1] == 0: + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:, :] + return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1], :] + + def get_config(self): + config = {'cropping': self.cropping, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/flatten.py +import math +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Flatten') +class Flatten(Layer): + + def __init__(self, data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(min_ndim=1) + self._channels_first = self.data_format == 'channels_first' + + def call(self, inputs): + input_shape = inputs.shape + rank = len(input_shape) + if self._channels_first and rank > 1: + inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1)) + output_shape = tuple((dim if dim is not None else -1 for dim in self.compute_output_shape(input_shape))) + return ops.reshape(inputs, output_shape) + + def compute_output_shape(self, input_shape): + non_batch_dims = input_shape[1:] + if len(non_batch_dims) == 0: + flattened_dim = 1 + elif any((d is None for d in non_batch_dims)): + flattened_dim = None + else: + flattened_dim = math.prod(non_batch_dims) + return (input_shape[0], flattened_dim) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse) + + def get_config(self): + config = {'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/permute.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.Permute') +class Permute(Layer): + + def __init__(self, dims, **kwargs): + super().__init__(**kwargs) + self.dims = tuple(dims) + if sorted(dims) != list(range(1, len(dims) + 1)): + raise ValueError(f'Invalid permutation argument `dims` for Permute Layer. The set of indices in `dims` must be consecutive and start from 1. Received dims={dims}') + self.input_spec = InputSpec(ndim=len(self.dims) + 1) + + def compute_output_shape(self, input_shape): + output_shape = [input_shape[0]] + for dim in self.dims: + output_shape.append(input_shape[dim]) + return tuple(output_shape) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse) + + def call(self, inputs): + return ops.transpose(inputs, axes=(0,) + self.dims) + + def get_config(self): + config = {'dims': self.dims} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/repeat_vector.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.RepeatVector') +class RepeatVector(Layer): + + def __init__(self, n, **kwargs): + super().__init__(**kwargs) + self.n = n + if not isinstance(n, int): + raise TypeError(f'Expected an integer value for `n`, got {type(n)}.') + self.input_spec = InputSpec(ndim=2) + + def compute_output_shape(self, input_shape): + return (input_shape[0], self.n, input_shape[1]) + + def call(self, inputs): + input_shape = ops.shape(inputs) + reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1])) + return ops.repeat(reshaped, self.n, axis=1) + + def get_config(self): + config = {'n': self.n} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/reshape.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer +from keras.src.ops import operation_utils + +@keras_export('keras.layers.Reshape') +class Reshape(Layer): + + def __init__(self, target_shape, **kwargs): + super().__init__(**kwargs) + self.target_shape = tuple(target_shape) + + def compute_output_shape(self, input_shape): + return (input_shape[0], *operation_utils.compute_reshape_output_shape(input_shape[1:], self.target_shape, 'target_shape')) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse) + + def build(self, input_shape): + sample_output_shape = operation_utils.compute_reshape_output_shape(input_shape[1:], self.target_shape, 'target_shape') + self._resolved_target_shape = tuple((-1 if d is None else d for d in sample_output_shape)) + self.built = True + + def call(self, inputs): + return ops.reshape(inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape) + + def get_config(self): + config = {'target_shape': self.target_shape} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/up_sampling1d.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.UpSampling1D') +class UpSampling1D(Layer): + + def __init__(self, size=2, **kwargs): + super().__init__(**kwargs) + self.size = int(size) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + size = self.size * input_shape[1] if input_shape[1] is not None else None + return [input_shape[0], size, input_shape[2]] + + def call(self, inputs): + return ops.repeat(x=inputs, repeats=self.size, axis=1) + + def get_config(self): + config = {'size': self.size} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/up_sampling2d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.UpSampling2D') +class UpSampling2D(Layer): + + def __init__(self, size=(2, 2), data_format=None, interpolation='nearest', **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.size = argument_validation.standardize_tuple(size, 2, 'size') + self.interpolation = interpolation.lower() + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + if self.data_format == 'channels_first': + height = self.size[0] * input_shape[2] if input_shape[2] is not None else None + width = self.size[1] * input_shape[3] if input_shape[3] is not None else None + return (input_shape[0], input_shape[1], height, width) + else: + height = self.size[0] * input_shape[1] if input_shape[1] is not None else None + width = self.size[1] * input_shape[2] if input_shape[2] is not None else None + return (input_shape[0], height, width, input_shape[3]) + + def call(self, inputs): + return self._resize_images(inputs, self.size[0], self.size[1], self.data_format, interpolation=self.interpolation) + + def get_config(self): + config = {'size': self.size, 'data_format': self.data_format, 'interpolation': self.interpolation} + base_config = super().get_config() + return {**base_config, **config} + + def _resize_images(self, x, height_factor, width_factor, data_format, interpolation='nearest'): + if data_format not in {'channels_last', 'channels_first'}: + raise ValueError(f'Invalid `data_format` argument: {data_format}') + if data_format == 'channels_first': + x = ops.transpose(x, [0, 2, 3, 1]) + if interpolation == 'nearest': + x = ops.repeat(x, height_factor, axis=1) + x = ops.repeat(x, width_factor, axis=2) + else: + shape = ops.shape(x) + new_shape = (shape[1] * height_factor, shape[2] * width_factor) + x = ops.image.resize(x, new_shape, interpolation=interpolation) + if data_format == 'channels_first': + x = ops.transpose(x, [0, 3, 1, 2]) + return x + +# File: keras-master/keras/src/layers/reshaping/up_sampling3d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.UpSampling3D') +class UpSampling3D(Layer): + + def __init__(self, size=(2, 2, 2), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.size = argument_validation.standardize_tuple(size, 3, 'size') + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + if self.data_format == 'channels_first': + dim1 = self.size[0] * input_shape[2] if input_shape[2] is not None else None + dim2 = self.size[1] * input_shape[3] if input_shape[3] is not None else None + dim3 = self.size[2] * input_shape[4] if input_shape[4] is not None else None + return (input_shape[0], input_shape[1], dim1, dim2, dim3) + else: + dim1 = self.size[0] * input_shape[1] if input_shape[1] is not None else None + dim2 = self.size[1] * input_shape[2] if input_shape[2] is not None else None + dim3 = self.size[2] * input_shape[3] if input_shape[3] is not None else None + return (input_shape[0], dim1, dim2, dim3, input_shape[4]) + + def call(self, inputs): + return self._resize_volumes(inputs, self.size[0], self.size[1], self.size[2], self.data_format) + + def get_config(self): + config = {'size': self.size, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + + def _resize_volumes(self, x, depth_factor, height_factor, width_factor, data_format): + if data_format == 'channels_first': + output = ops.repeat(x, depth_factor, axis=2) + output = ops.repeat(output, height_factor, axis=3) + output = ops.repeat(output, width_factor, axis=4) + return output + elif data_format == 'channels_last': + output = ops.repeat(x, depth_factor, axis=1) + output = ops.repeat(output, height_factor, axis=2) + output = ops.repeat(output, width_factor, axis=3) + return output + else: + raise ValueError(f'Invalid data_format: {data_format}') + +# File: keras-master/keras/src/layers/reshaping/zero_padding1d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.ZeroPadding1D') +class ZeroPadding1D(Layer): + + def __init__(self, padding=1, data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.padding = argument_validation.standardize_tuple(padding, 2, 'padding', allow_zero=True) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + padding_dim = 2 if self.data_format == 'channels_first' else 1 + if output_shape[padding_dim] is not None: + output_shape[padding_dim] += self.padding[0] + self.padding[1] + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == 'channels_first': + all_dims_padding = ((0, 0), (0, 0), self.padding) + else: + all_dims_padding = ((0, 0), self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {'padding': self.padding, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/zero_padding2d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.ZeroPadding2D') +class ZeroPadding2D(Layer): + + def __init__(self, padding=(1, 1), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(padding, int): + self.padding = ((padding, padding), (padding, padding)) + elif hasattr(padding, '__len__'): + if len(padding) != 2: + raise ValueError(f'`padding` should have two elements. Received: padding={padding}.') + height_padding = argument_validation.standardize_tuple(padding[0], 2, '1st entry of padding', allow_zero=True) + width_padding = argument_validation.standardize_tuple(padding[1], 2, '2nd entry of padding', allow_zero=True) + self.padding = (height_padding, width_padding) + else: + raise ValueError(f'`padding` should be either an int, a tuple of 2 ints (symmetric_height_crop, symmetric_width_crop), or a tuple of 2 tuples of 2 ints ((top_crop, bottom_crop), (left_crop, right_crop)). Received: padding={padding}.') + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + spatial_dims_offset = 2 if self.data_format == 'channels_first' else 1 + for index in range(0, 2): + if output_shape[index + spatial_dims_offset] is not None: + output_shape[index + spatial_dims_offset] += self.padding[index][0] + self.padding[index][1] + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == 'channels_first': + all_dims_padding = ((0, 0), (0, 0), *self.padding) + else: + all_dims_padding = ((0, 0), *self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {'padding': self.padding, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/reshaping/zero_padding3d.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + +@keras_export('keras.layers.ZeroPadding3D') +class ZeroPadding3D(Layer): + + def __init__(self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(padding, int): + self.padding = ((padding, padding), (padding, padding), (padding, padding)) + elif hasattr(padding, '__len__'): + if len(padding) != 3: + raise ValueError(f'`padding` should have 3 elements. Received: {padding}.') + dim1_padding = argument_validation.standardize_tuple(padding[0], 2, '1st entry of padding', allow_zero=True) + dim2_padding = argument_validation.standardize_tuple(padding[1], 2, '2nd entry of padding', allow_zero=True) + dim3_padding = argument_validation.standardize_tuple(padding[2], 2, '3rd entry of padding', allow_zero=True) + self.padding = (dim1_padding, dim2_padding, dim3_padding) + else: + raise ValueError(f'`padding` should be either an int, a tuple of 3 ints (symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), or a tuple of 3 tuples of 2 ints ((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim2_pad)). Received: padding={padding}.') + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + spatial_dims_offset = 2 if self.data_format == 'channels_first' else 1 + for index in range(0, 3): + if output_shape[index + spatial_dims_offset] is not None: + output_shape[index + spatial_dims_offset] += self.padding[index][0] + self.padding[index][1] + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == 'channels_first': + all_dims_padding = ((0, 0), (0, 0), *self.padding) + else: + all_dims_padding = ((0, 0), *self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {'padding': self.padding, 'data_format': self.data_format} + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/layers/rnn/bidirectional.py +import copy +from keras.src import ops +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib + +@keras_export('keras.layers.Bidirectional') +class Bidirectional(Layer): + + def __init__(self, layer, merge_mode='concat', weights=None, backward_layer=None, **kwargs): + if not isinstance(layer, Layer): + raise ValueError(f'Please initialize `Bidirectional` layer with a `keras.layers.Layer` instance. Received: {layer}') + if backward_layer is not None and (not isinstance(backward_layer, Layer)): + raise ValueError(f'`backward_layer` need to be a `keras.layers.Layer` instance. Received: {backward_layer}') + if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]: + raise ValueError(f'Invalid merge mode. Received: {merge_mode}. Merge mode should be one of {{"sum", "mul", "ave", "concat", None}}') + super().__init__(**kwargs) + config = serialization_lib.serialize_keras_object(layer) + config['config']['name'] = 'forward_' + utils.removeprefix(layer.name, 'forward_') + self.forward_layer = serialization_lib.deserialize_keras_object(config) + if backward_layer is None: + config = serialization_lib.serialize_keras_object(layer) + config['config']['go_backwards'] = True + config['config']['name'] = 'backward_' + utils.removeprefix(layer.name, 'backward_') + self.backward_layer = serialization_lib.deserialize_keras_object(config) + else: + self.backward_layer = backward_layer + self._verify_layer_config() + + def force_zero_output_for_mask(layer): + if getattr(layer, 'zero_output_for_mask', None) is not None: + layer.zero_output_for_mask = layer.return_sequences + force_zero_output_for_mask(self.forward_layer) + force_zero_output_for_mask(self.backward_layer) + self.merge_mode = merge_mode + if weights: + nw = len(weights) + self.forward_layer.initial_weights = weights[:nw // 2] + self.backward_layer.initial_weights = weights[nw // 2:] + self.stateful = layer.stateful + self.return_sequences = layer.return_sequences + self.return_state = layer.return_state + self.supports_masking = True + self.input_spec = layer.input_spec + + def _verify_layer_config(self): + if self.forward_layer.go_backwards == self.backward_layer.go_backwards: + raise ValueError(f'Forward layer and backward layer should have different `go_backwards` value. Received: forward_layer.go_backwards {self.forward_layer.go_backwards}, backward_layer.go_backwards={self.backward_layer.go_backwards}') + common_attributes = ('stateful', 'return_sequences', 'return_state') + for a in common_attributes: + forward_value = getattr(self.forward_layer, a) + backward_value = getattr(self.backward_layer, a) + if forward_value != backward_value: + raise ValueError(f'Forward layer and backward layer are expected to have the same value for attribute "{a}", got "{forward_value}" for forward layer and "{backward_value}" for backward layer') + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + output_shape = self.forward_layer.compute_output_shape(sequences_shape) + if self.return_state: + (output_shape, state_shape) = (output_shape[0], output_shape[1:]) + if self.merge_mode == 'concat': + output_shape = list(output_shape) + output_shape[-1] *= 2 + output_shape = tuple(output_shape) + elif self.merge_mode is None: + output_shape = [output_shape, output_shape] + if self.return_state: + if self.merge_mode is None: + return tuple(output_shape) + state_shape + state_shape + return tuple([output_shape]) + state_shape + state_shape + return tuple(output_shape) + + def call(self, sequences, initial_state=None, mask=None, training=None): + kwargs = {} + if self.forward_layer._call_has_training_arg: + kwargs['training'] = training + if self.forward_layer._call_has_mask_arg: + kwargs['mask'] = mask + if initial_state is not None: + (forward_inputs, backward_inputs) = (sequences, sequences) + half = len(initial_state) // 2 + forward_state = initial_state[:half] + backward_state = initial_state[half:] + else: + (forward_inputs, backward_inputs) = (sequences, sequences) + (forward_state, backward_state) = (None, None) + y = self.forward_layer(forward_inputs, initial_state=forward_state, **kwargs) + y_rev = self.backward_layer(backward_inputs, initial_state=backward_state, **kwargs) + if self.return_state: + states = tuple(y[1:] + y_rev[1:]) + y = y[0] + y_rev = y_rev[0] + y = ops.cast(y, self.compute_dtype) + y_rev = ops.cast(y_rev, self.compute_dtype) + if self.return_sequences: + y_rev = ops.flip(y_rev, axis=1) + if self.merge_mode == 'concat': + output = ops.concatenate([y, y_rev], axis=-1) + elif self.merge_mode == 'sum': + output = y + y_rev + elif self.merge_mode == 'ave': + output = (y + y_rev) / 2 + elif self.merge_mode == 'mul': + output = y * y_rev + elif self.merge_mode is None: + output = (y, y_rev) + else: + raise ValueError(f'Unrecognized value for `merge_mode`. Received: {self.merge_mode}Expected one of {{"concat", "sum", "ave", "mul"}}.') + if self.return_state: + if self.merge_mode is None: + return output + states + return (output,) + states + return output + + def reset_states(self): + self.reset_state() + + def reset_state(self): + if not self.stateful: + raise AttributeError('Layer must be stateful.') + self.forward_layer.reset_state() + self.backward_layer.reset_state() + + @property + def states(self): + if self.forward_layer.states and self.backward_layer.states: + return tuple(self.forward_layer.states + self.backward_layer.states) + return None + + def build(self, sequences_shape, initial_state_shape=None): + if not self.forward_layer.built: + self.forward_layer.build(sequences_shape) + if not self.backward_layer.built: + self.backward_layer.build(sequences_shape) + self.built = True + + def compute_mask(self, _, mask): + if isinstance(mask, list): + mask = mask[0] + if self.return_sequences: + if not self.merge_mode: + output_mask = (mask, mask) + else: + output_mask = mask + else: + output_mask = (None, None) if not self.merge_mode else None + if self.return_state and self.states is not None: + state_mask = (None for _ in self.states) + if isinstance(output_mask, list): + return output_mask + state_mask * 2 + return (output_mask,) + state_mask * 2 + return output_mask + + def get_config(self): + config = {'merge_mode': self.merge_mode} + config['layer'] = serialization_lib.serialize_keras_object(self.forward_layer) + config['backward_layer'] = serialization_lib.serialize_keras_object(self.backward_layer) + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + config = copy.deepcopy(config) + config['layer'] = serialization_lib.deserialize_keras_object(config['layer'], custom_objects=custom_objects) + backward_layer_config = config.pop('backward_layer', None) + if backward_layer_config is not None: + backward_layer = serialization_lib.deserialize_keras_object(backward_layer_config, custom_objects=custom_objects) + config['backward_layer'] = backward_layer + layer = cls(**config) + return layer + +# File: keras-master/keras/src/layers/rnn/conv_lstm.py +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.ops import operation_utils +from keras.src.utils import argument_validation + +class ConvLSTMCell(Layer, DropoutRNNCell): + + def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, **kwargs): + super().__init__(**kwargs) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + self.rank = rank + if self.rank > 3: + raise ValueError(f'Rank {rank} convolutions are not currently implemented. Received: rank={rank}') + self.filters = filters + self.kernel_size = argument_validation.standardize_tuple(kernel_size, self.rank, 'kernel_size') + self.strides = argument_validation.standardize_tuple(strides, self.rank, 'strides', allow_zero=True) + self.padding = argument_validation.standardize_padding(padding) + self.data_format = backend.standardize_data_format(data_format) + self.dilation_rate = argument_validation.standardize_tuple(dilation_rate, self.rank, 'dilation_rate') + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.unit_forget_bias = unit_forget_bias + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.dropout_mask_count = 4 + self.input_spec = InputSpec(ndim=rank + 2) + self.state_size = -1 + + def build(self, inputs_shape, states_shape=None): + if self.data_format == 'channels_first': + channel_axis = 1 + self.spatial_dims = inputs_shape[2:] + else: + channel_axis = -1 + self.spatial_dims = inputs_shape[1:-1] + if None in self.spatial_dims: + raise ValueError(f'ConvLSTM layers only support static input shapes for the spatial dimension. Received invalid input shape: input_shape={inputs_shape}') + if inputs_shape[channel_axis] is None: + raise ValueError(f'The channel dimension of the inputs (last axis) should be defined. Found None. Full input shape received: input_shape={inputs_shape}') + self.input_spec = InputSpec(ndim=self.rank + 3, shape=(None,) + inputs_shape[1:]) + input_dim = inputs_shape[channel_axis] + self.input_dim = input_dim + self.kernel_shape = self.kernel_size + (input_dim, self.filters * 4) + recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4) + self.kernel = self.add_weight(shape=self.kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) + self.recurrent_kernel = self.add_weight(shape=recurrent_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) + if self.use_bias: + if self.unit_forget_bias: + + def bias_initializer(_, *args, **kwargs): + return ops.concatenate([self.bias_initializer((self.filters,), *args, **kwargs), initializers.get('ones')((self.filters,), *args, **kwargs), self.bias_initializer((self.filters * 2,), *args, **kwargs)]) + else: + bias_initializer = self.bias_initializer + self.bias = self.add_weight(shape=(self.filters * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) + else: + self.bias = None + self.built = True + + def call(self, inputs, states, training=False): + h_tm1 = states[0] + c_tm1 = states[1] + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs_i = inputs * dp_mask[0] + inputs_f = inputs * dp_mask[1] + inputs_c = inputs * dp_mask[2] + inputs_o = inputs * dp_mask[3] + else: + inputs_i = inputs + inputs_f = inputs + inputs_c = inputs + inputs_o = inputs + if training and 0.0 < self.recurrent_dropout < 1.0: + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + h_tm1_i = h_tm1 * rec_dp_mask[0] + h_tm1_f = h_tm1 * rec_dp_mask[1] + h_tm1_c = h_tm1 * rec_dp_mask[2] + h_tm1_o = h_tm1 * rec_dp_mask[3] + else: + h_tm1_i = h_tm1 + h_tm1_f = h_tm1 + h_tm1_c = h_tm1 + h_tm1_o = h_tm1 + (kernel_i, kernel_f, kernel_c, kernel_o) = ops.split(self.kernel, 4, axis=self.rank + 1) + (recurrent_kernel_i, recurrent_kernel_f, recurrent_kernel_c, recurrent_kernel_o) = ops.split(self.recurrent_kernel, 4, axis=self.rank + 1) + if self.use_bias: + (bias_i, bias_f, bias_c, bias_o) = ops.split(self.bias, 4) + else: + (bias_i, bias_f, bias_c, bias_o) = (None, None, None, None) + x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding) + x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding) + x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding) + x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding) + h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i) + h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f) + h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c) + h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o) + i = self.recurrent_activation(x_i + h_i) + f = self.recurrent_activation(x_f + h_f) + c = f * c_tm1 + i * self.activation(x_c + h_c) + o = self.recurrent_activation(x_o + h_o) + h = o * self.activation(c) + return (h, [h, c]) + + def compute_output_shape(self, inputs_shape, states_shape=None): + conv_output_shape = operation_utils.compute_conv_output_shape(inputs_shape, self.filters, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + return (conv_output_shape, [conv_output_shape, conv_output_shape]) + + def get_initial_state(self, batch_size=None): + if self.data_format == 'channels_last': + input_shape = (batch_size,) + self.spatial_dims + (self.input_dim,) + else: + input_shape = (batch_size, self.input_dim) + self.spatial_dims + state_shape = self.compute_output_shape(input_shape)[0] + return [ops.zeros(state_shape, dtype=self.compute_dtype), ops.zeros(state_shape, dtype=self.compute_dtype)] + + def input_conv(self, x, w, b=None, padding='valid'): + conv_out = ops.conv(x, w, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + if b is not None: + if self.data_format == 'channels_last': + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(b, bias_shape) + conv_out += bias + return conv_out + + def recurrent_conv(self, x, w): + strides = argument_validation.standardize_tuple(1, self.rank, 'strides', allow_zero=True) + conv_out = ops.conv(x, w, strides=strides, padding='same', data_format=self.data_format) + return conv_out + + def get_config(self): + config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +class ConvLSTM(RNN): + + def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): + cell = ConvLSTMCell(rank=rank, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, name='conv_lstm_cell', dtype=kwargs.get('dtype')) + super().__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, **kwargs) + self.input_spec = InputSpec(ndim=rank + 3) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call(sequences, initial_state=initial_state, mask=mask, training=training) + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + batch_size = sequences_shape[0] + steps = sequences_shape[1] + step_shape = (batch_size,) + sequences_shape[2:] + state_shape = self.cell.compute_output_shape(step_shape)[0][1:] + if self.return_sequences: + output_shape = (batch_size, steps) + state_shape + else: + output_shape = (batch_size,) + state_shape + if self.return_state: + batched_state_shape = (batch_size,) + state_shape + return (output_shape, batched_state_shape, batched_state_shape) + return output_shape + + def compute_mask(self, _, mask): + mask = tree.flatten(mask)[0] + output_mask = mask if self.return_sequences else None + if self.return_state: + state_mask = [None, None] + return [output_mask] + state_mask + else: + return output_mask + + @property + def filters(self): + return self.cell.filters + + @property + def kernel_size(self): + return self.cell.kernel_size + + @property + def strides(self): + return self.cell.strides + + @property + def padding(self): + return self.cell.padding + + @property + def data_format(self): + return self.cell.data_format + + @property + def dilation_rate(self): + return self.cell.dilation_rate + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def unit_forget_bias(self): + return self.cell.unit_forget_bias + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.cell.seed} + base_config = super().get_config() + del base_config['cell'] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) + +# File: keras-master/keras/src/layers/rnn/conv_lstm1d.py +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + +@keras_export('keras.layers.ConvLSTM1D') +class ConvLSTM1D(ConvLSTM): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): + super().__init__(rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, **kwargs) + +# File: keras-master/keras/src/layers/rnn/conv_lstm2d.py +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + +@keras_export('keras.layers.ConvLSTM2D') +class ConvLSTM2D(ConvLSTM): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): + super().__init__(rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, **kwargs) + +# File: keras-master/keras/src/layers/rnn/conv_lstm3d.py +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + +@keras_export('keras.layers.ConvLSTM3D') +class ConvLSTM3D(ConvLSTM): + + def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs): + super().__init__(rank=3, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, **kwargs) + +# File: keras-master/keras/src/layers/rnn/dropout_rnn_cell.py +from keras.src import backend +from keras.src import ops + +class DropoutRNNCell: + + def _create_dropout_mask(self, step_input, dropout_rate): + count = getattr(self, 'dropout_mask_count', None) + ones = ops.ones_like(step_input) + if count is None: + return backend.random.dropout(ones, rate=dropout_rate, seed=self.seed_generator) + else: + return [backend.random.dropout(ones, rate=dropout_rate, seed=self.seed_generator) for _ in range(count)] + + def get_dropout_mask(self, step_input): + if not hasattr(self, '_dropout_mask'): + self._dropout_mask = None + if self._dropout_mask is None and self.dropout > 0: + self._dropout_mask = self._create_dropout_mask(step_input, self.dropout) + return self._dropout_mask + + def get_recurrent_dropout_mask(self, step_input): + if not hasattr(self, '_recurrent_dropout_mask'): + self._recurrent_dropout_mask = None + if self._recurrent_dropout_mask is None and self.recurrent_dropout > 0: + self._recurrent_dropout_mask = self._create_dropout_mask(step_input, self.recurrent_dropout) + return self._recurrent_dropout_mask + + def reset_dropout_mask(self): + self._dropout_mask = None + + def reset_recurrent_dropout_mask(self): + self._recurrent_dropout_mask = None + +# File: keras-master/keras/src/layers/rnn/gru.py +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + +@keras_export('keras.layers.GRUCell') +class GRUCell(Layer, DropoutRNNCell): + + def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, reset_after=True, seed=None, **kwargs): + if units <= 0: + raise ValueError(f'Received an invalid value for argument `units`, expected a positive integer, got {units}.') + implementation = kwargs.pop('implementation', 2) + super().__init__(**kwargs) + self.implementation = implementation + self.units = units + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + self.reset_after = reset_after + self.state_size = self.units + self.output_size = self.units + + def build(self, input_shape): + super().build(input_shape) + input_dim = input_shape[-1] + self.kernel = self.add_weight(shape=(input_dim, self.units * 3), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) + self.recurrent_kernel = self.add_weight(shape=(self.units, self.units * 3), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) + if self.use_bias: + if not self.reset_after: + bias_shape = (3 * self.units,) + else: + bias_shape = (2, 3 * self.units) + self.bias = self.add_weight(shape=bias_shape, name='bias', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) + else: + self.bias = None + self.built = True + + def call(self, inputs, states, training=False): + h_tm1 = states[0] if tree.is_nested(states) else states + dp_mask = self.get_dropout_mask(inputs) + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + if self.use_bias: + if not self.reset_after: + (input_bias, recurrent_bias) = (self.bias, None) + else: + (input_bias, recurrent_bias) = (ops.squeeze(e, axis=0) for e in ops.split(self.bias, self.bias.shape[0], axis=0)) + if training and 0.0 < self.dropout < 1.0: + inputs = inputs * dp_mask + if training and 0.0 < self.recurrent_dropout < 1.0: + h_tm1 = h_tm1 * rec_dp_mask + if self.implementation == 1: + inputs_z = inputs + inputs_r = inputs + inputs_h = inputs + x_z = ops.matmul(inputs_z, self.kernel[:, :self.units]) + x_r = ops.matmul(inputs_r, self.kernel[:, self.units:self.units * 2]) + x_h = ops.matmul(inputs_h, self.kernel[:, self.units * 2:]) + if self.use_bias: + x_z += input_bias[:self.units] + x_r += input_bias[self.units:self.units * 2] + x_h += input_bias[self.units * 2:] + h_tm1_z = h_tm1 + h_tm1_r = h_tm1 + h_tm1_h = h_tm1 + recurrent_z = ops.matmul(h_tm1_z, self.recurrent_kernel[:, :self.units]) + recurrent_r = ops.matmul(h_tm1_r, self.recurrent_kernel[:, self.units:self.units * 2]) + if self.reset_after and self.use_bias: + recurrent_z += recurrent_bias[:self.units] + recurrent_r += recurrent_bias[self.units:self.units * 2] + z = self.recurrent_activation(x_z + recurrent_z) + r = self.recurrent_activation(x_r + recurrent_r) + if self.reset_after: + recurrent_h = ops.matmul(h_tm1_h, self.recurrent_kernel[:, self.units * 2:]) + if self.use_bias: + recurrent_h += recurrent_bias[self.units * 2:] + recurrent_h = r * recurrent_h + else: + recurrent_h = ops.matmul(r * h_tm1_h, self.recurrent_kernel[:, self.units * 2:]) + hh = self.activation(x_h + recurrent_h) + else: + matrix_x = ops.matmul(inputs, self.kernel) + if self.use_bias: + matrix_x += input_bias + (x_z, x_r, x_h) = ops.split(matrix_x, 3, axis=-1) + if self.reset_after: + matrix_inner = ops.matmul(h_tm1, self.recurrent_kernel) + if self.use_bias: + matrix_inner += recurrent_bias + else: + matrix_inner = ops.matmul(h_tm1, self.recurrent_kernel[:, :2 * self.units]) + recurrent_z = matrix_inner[:, :self.units] + recurrent_r = matrix_inner[:, self.units:self.units * 2] + recurrent_h = matrix_inner[:, self.units * 2:] + z = self.recurrent_activation(x_z + recurrent_z) + r = self.recurrent_activation(x_r + recurrent_r) + if self.reset_after: + recurrent_h = r * recurrent_h + else: + recurrent_h = ops.matmul(r * h_tm1, self.recurrent_kernel[:, 2 * self.units:]) + hh = self.activation(x_h + recurrent_h) + h = z * h_tm1 + (1 - z) * hh + new_state = [h] if tree.is_nested(states) else h + return (h, new_state) + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'reset_after': self.reset_after, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + + def get_initial_state(self, batch_size=None): + return [ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype)] + +@keras_export('keras.layers.GRU') +class GRU(RNN): + + def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, reset_after=True, use_cudnn='auto', **kwargs): + cell = GRUCell(units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, reset_after=reset_after, dtype=kwargs.get('dtype', None), trainable=kwargs.get('trainable', True), name='gru_cell', seed=seed, implementation=kwargs.pop('implementation', 2)) + super().__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, activity_regularizer=activity_regularizer, **kwargs) + self.input_spec = InputSpec(ndim=3) + if use_cudnn not in ('auto', True, False): + raise ValueError(f"Invalid valid received for argument `use_cudnn`. Expected one of {{'auto', True, False}}. Received: use_cudnn={use_cudnn}") + self.use_cudnn = use_cudnn + if backend.backend() == 'tensorflow' and backend.cudnn_ok(cell.activation, cell.recurrent_activation, self.unroll, cell.use_bias, reset_after=reset_after) and (use_cudnn in (True, 'auto')): + self.supports_jit = False + + def inner_loop(self, sequences, initial_state, mask, training=False): + if tree.is_nested(initial_state): + initial_state = initial_state[0] + if tree.is_nested(mask): + mask = mask[0] + if self.use_cudnn in ('auto', True): + if not self.recurrent_dropout: + try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to(dp_mask, ops.shape(sequences)) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences + out = backend.gru(dp_sequences, initial_state, mask, kernel=self.cell.kernel, recurrent_kernel=self.cell.recurrent_kernel, bias=self.cell.bias, activation=self.cell.activation, recurrent_activation=self.cell.recurrent_activation, return_sequences=self.return_sequences, go_backwards=self.go_backwards, unroll=self.unroll, reset_after=self.cell.reset_after) + if backend.backend() == 'tensorflow': + self.supports_jit = False + return out + except NotImplementedError: + pass + if self.use_cudnn is True: + raise ValueError("use_cudnn=True was specified, but cuDNN is not supported for this layer configuration with this backend. Pass use_cudnn='auto' to fallback to a non-cuDNN implementation.") + return super().inner_loop(sequences, initial_state, mask=mask, training=training) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call(sequences, mask=mask, training=training, initial_state=initial_state) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + @property + def reset_after(self): + return self.cell.reset_after + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'reset_after': self.reset_after, 'seed': self.cell.seed} + base_config = super().get_config() + del base_config['cell'] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) + +# File: keras-master/keras/src/layers/rnn/lstm.py +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + +@keras_export('keras.layers.LSTMCell') +class LSTMCell(Layer, DropoutRNNCell): + + def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, **kwargs): + if units <= 0: + raise ValueError(f'Received an invalid value for argument `units`, expected a positive integer, got {units}.') + implementation = kwargs.pop('implementation', 2) + super().__init__(**kwargs) + self.units = units + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + self.unit_forget_bias = unit_forget_bias + self.state_size = [self.units, self.units] + self.output_size = self.units + self.implementation = implementation + + def build(self, input_shape): + super().build(input_shape) + input_dim = input_shape[-1] + self.kernel = self.add_weight(shape=(input_dim, self.units * 4), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) + self.recurrent_kernel = self.add_weight(shape=(self.units, self.units * 4), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) + if self.use_bias: + if self.unit_forget_bias: + + def bias_initializer(_, *args, **kwargs): + return ops.concatenate([self.bias_initializer((self.units,), *args, **kwargs), initializers.get('ones')((self.units,), *args, **kwargs), self.bias_initializer((self.units * 2,), *args, **kwargs)]) + else: + bias_initializer = self.bias_initializer + self.bias = self.add_weight(shape=(self.units * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) + else: + self.bias = None + self.built = True + + def _compute_carry_and_output(self, x, h_tm1, c_tm1): + (x_i, x_f, x_c, x_o) = x + (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o) = h_tm1 + i = self.recurrent_activation(x_i + ops.matmul(h_tm1_i, self.recurrent_kernel[:, :self.units])) + f = self.recurrent_activation(x_f + ops.matmul(h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2])) + c = f * c_tm1 + i * self.activation(x_c + ops.matmul(h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3])) + o = self.recurrent_activation(x_o + ops.matmul(h_tm1_o, self.recurrent_kernel[:, self.units * 3:])) + return (c, o) + + def _compute_carry_and_output_fused(self, z, c_tm1): + (z0, z1, z2, z3) = z + i = self.recurrent_activation(z0) + f = self.recurrent_activation(z1) + c = f * c_tm1 + i * self.activation(z2) + o = self.recurrent_activation(z3) + return (c, o) + + def call(self, inputs, states, training=False): + h_tm1 = states[0] + c_tm1 = states[1] + dp_mask = self.get_dropout_mask(inputs) + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + if training and 0.0 < self.dropout < 1.0: + inputs = inputs * dp_mask + if training and 0.0 < self.recurrent_dropout < 1.0: + h_tm1 = h_tm1 * rec_dp_mask + if self.implementation == 1: + inputs_i = inputs + inputs_f = inputs + inputs_c = inputs + inputs_o = inputs + (k_i, k_f, k_c, k_o) = ops.split(self.kernel, 4, axis=1) + x_i = ops.matmul(inputs_i, k_i) + x_f = ops.matmul(inputs_f, k_f) + x_c = ops.matmul(inputs_c, k_c) + x_o = ops.matmul(inputs_o, k_o) + if self.use_bias: + (b_i, b_f, b_c, b_o) = ops.split(self.bias, 4, axis=0) + x_i += b_i + x_f += b_f + x_c += b_c + x_o += b_o + h_tm1_i = h_tm1 + h_tm1_f = h_tm1 + h_tm1_c = h_tm1 + h_tm1_o = h_tm1 + x = (x_i, x_f, x_c, x_o) + h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o) + (c, o) = self._compute_carry_and_output(x, h_tm1, c_tm1) + else: + z = ops.matmul(inputs, self.kernel) + z += ops.matmul(h_tm1, self.recurrent_kernel) + if self.use_bias: + z += self.bias + z = ops.split(z, 4, axis=1) + (c, o) = self._compute_carry_and_output_fused(z, c_tm1) + h = o * self.activation(c) + return (h, [h, c]) + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'unit_forget_bias': self.unit_forget_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + + def get_initial_state(self, batch_size=None): + return [ops.zeros((batch_size, d), dtype=self.compute_dtype) for d in self.state_size] + +@keras_export('keras.layers.LSTM') +class LSTM(RNN): + + def __init__(self, units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, use_cudnn='auto', **kwargs): + cell = LSTMCell(units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, unit_forget_bias=unit_forget_bias, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, dtype=kwargs.get('dtype', None), trainable=kwargs.get('trainable', True), name='lstm_cell', seed=seed, implementation=kwargs.pop('implementation', 2)) + super().__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, activity_regularizer=activity_regularizer, **kwargs) + self.input_spec = InputSpec(ndim=3) + if use_cudnn not in ('auto', True, False): + raise ValueError(f"Invalid valid received for argument `use_cudnn`. Expected one of {{'auto', True, False}}. Received: use_cudnn={use_cudnn}") + self.use_cudnn = use_cudnn + if backend.backend() == 'tensorflow' and backend.cudnn_ok(cell.activation, cell.recurrent_activation, self.unroll, cell.use_bias) and (use_cudnn in (True, 'auto')): + self.supports_jit = False + + def inner_loop(self, sequences, initial_state, mask, training=False): + if tree.is_nested(mask): + mask = mask[0] + if self.use_cudnn in ('auto', True): + if not self.recurrent_dropout: + try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to(dp_mask, ops.shape(sequences)) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences + out = backend.lstm(dp_sequences, initial_state[0], initial_state[1], mask, kernel=self.cell.kernel, recurrent_kernel=self.cell.recurrent_kernel, bias=self.cell.bias, activation=self.cell.activation, recurrent_activation=self.cell.recurrent_activation, return_sequences=self.return_sequences, go_backwards=self.go_backwards, unroll=self.unroll) + if backend.backend() == 'tensorflow': + self.supports_jit = False + return out + except NotImplementedError: + pass + if self.use_cudnn is True: + raise ValueError("use_cudnn=True was specified, but cuDNN is not supported for this layer configuration with this backend. Pass use_cudnn='auto' to fallback to a non-cuDNN implementation.") + return super().inner_loop(sequences, initial_state, mask=mask, training=training) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call(sequences, mask=mask, training=training, initial_state=initial_state) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def unit_forget_bias(self): + return self.cell.unit_forget_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.cell.seed} + base_config = super().get_config() + del base_config['cell'] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) + +# File: keras-master/keras/src/layers/rnn/rnn.py +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.saving import serialization_lib +from keras.src.utils import tracking + +@keras_export('keras.layers.RNN') +class RNN(Layer): + + def __init__(self, cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, zero_output_for_mask=False, **kwargs): + if isinstance(cell, (list, tuple)): + cell = StackedRNNCells(cell) + if 'call' not in dir(cell): + raise ValueError(f'Argument `cell` should have a `call` method. Received: cell={cell}') + if 'state_size' not in dir(cell): + raise ValueError(f'The RNN cell should have a `state_size` attribute (single integer or list of integers, one integer per RNN state). Received: cell={cell}') + super().__init__(**kwargs) + self.zero_output_for_mask = zero_output_for_mask + self.cell = cell + self.return_sequences = return_sequences + self.return_state = return_state + self.go_backwards = go_backwards + self.stateful = stateful + self.unroll = unroll + self.supports_masking = True + self.input_spec = None + self.states = None + state_size = getattr(self.cell, 'state_size', None) + if state_size is None: + raise ValueError('state_size must be specified as property on the RNN cell.') + if not isinstance(state_size, (list, tuple, int)): + raise ValueError('state_size must be an integer, or a list/tuple of integers (one for each state tensor).') + if isinstance(state_size, int): + self.state_size = [state_size] + self.single_state = True + else: + self.state_size = list(state_size) + self.single_state = False + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + batch_size = sequences_shape[0] + length = sequences_shape[1] + states_shape = [] + for state_size in self.state_size: + if isinstance(state_size, int): + states_shape.append((batch_size, state_size)) + elif isinstance(state_size, (list, tuple)): + states_shape.append([(batch_size, s) for s in state_size]) + output_size = getattr(self.cell, 'output_size', None) + if output_size is None: + output_size = self.state_size[0] + if not isinstance(output_size, int): + raise ValueError('output_size must be an integer.') + if self.return_sequences: + output_shape = (batch_size, length, output_size) + else: + output_shape = (batch_size, output_size) + if self.return_state: + return (output_shape, *states_shape) + return output_shape + + def compute_mask(self, _, mask): + mask = tree.flatten(mask)[0] + output_mask = mask if self.return_sequences else None + if self.return_state: + state_mask = [None for _ in self.state_size] + return [output_mask] + state_mask + else: + return output_mask + + def build(self, sequences_shape, initial_state_shape=None): + step_input_shape = (sequences_shape[0],) + tuple(sequences_shape[2:]) + if isinstance(self.cell, Layer) and (not self.cell.built): + self.cell.build(step_input_shape) + self.cell.built = True + if self.stateful: + if self.states is not None: + self.reset_state() + else: + if sequences_shape[0] is None: + raise ValueError(f'When using `stateful=True` in a RNN, the batch size must be static. Found dynamic batch size: sequence.shape={sequences_shape}') + self._create_state_variables(sequences_shape[0]) + self.built = True + + @tracking.no_automatic_dependency_tracking + def _create_state_variables(self, batch_size): + with backend.name_scope(self.name, caller=self): + self.states = tree.map_structure(lambda value: backend.Variable(value, trainable=False, dtype=self.variable_dtype, name='rnn_state'), self.get_initial_state(batch_size)) + + def get_initial_state(self, batch_size): + get_initial_state_fn = getattr(self.cell, 'get_initial_state', None) + if get_initial_state_fn: + init_state = get_initial_state_fn(batch_size=batch_size) + else: + return [ops.zeros((batch_size, d), dtype=self.cell.compute_dtype) for d in self.state_size] + if not tree.is_nested(init_state): + init_state = [init_state] + return list(init_state) + + def reset_states(self): + self.reset_state() + + def reset_state(self): + if self.states is not None: + for v in self.states: + v.assign(ops.zeros_like(v)) + + def inner_loop(self, sequences, initial_state, mask, training=False): + cell_kwargs = {} + if isinstance(self.cell, Layer) and self.cell._call_has_training_arg: + cell_kwargs['training'] = training + + def step(inputs, states): + (output, new_states) = self.cell(inputs, states, **cell_kwargs) + if not tree.is_nested(new_states): + new_states = [new_states] + return (output, new_states) + if not tree.is_nested(initial_state): + initial_state = [initial_state] + return backend.rnn(step, sequences, initial_state, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=sequences.shape[1], zero_output_for_mask=self.zero_output_for_mask, return_all_outputs=self.return_sequences) + + def call(self, sequences, initial_state=None, mask=None, training=False): + timesteps = sequences.shape[1] + if self.unroll and timesteps is None: + raise ValueError('Cannot unroll a RNN if the time dimension is undefined. \n- If using a Sequential model, specify the time dimension by passing an `Input()` as your first layer.\n- If using the functional API, specify the time dimension by passing a `shape` or `batch_shape` argument to your `Input()`.') + if initial_state is None: + if self.stateful: + initial_state = self.states + else: + initial_state = self.get_initial_state(batch_size=ops.shape(sequences)[0]) + if not tree.is_nested(initial_state): + initial_state = [initial_state] + initial_state = list(initial_state) + initial_state = tree.map_structure(lambda x: backend.convert_to_tensor(x, dtype=self.cell.compute_dtype), initial_state) + self._maybe_config_dropout_masks(self.cell, sequences[:, 0, :], initial_state) + (last_output, outputs, states) = self.inner_loop(sequences=sequences, initial_state=initial_state, mask=mask, training=training) + last_output = ops.cast(last_output, self.compute_dtype) + outputs = ops.cast(outputs, self.compute_dtype) + states = tree.map_structure(lambda x: ops.cast(x, dtype=self.compute_dtype), states) + self._maybe_reset_dropout_masks(self.cell) + if self.stateful: + for (self_state, state) in zip(tree.flatten(self.states), tree.flatten(states)): + self_state.assign(state) + if self.return_sequences: + output = outputs + else: + output = last_output + if self.return_state: + return (output, *states) + return output + + def _maybe_config_dropout_masks(self, cell, input_sequence, input_state): + state = input_state[0] if isinstance(input_state, (list, tuple)) else input_state + if isinstance(cell, DropoutRNNCell): + cell.get_dropout_mask(input_sequence) + cell.get_recurrent_dropout_mask(state) + if isinstance(cell, StackedRNNCells): + for (c, s) in zip(cell.cells, input_state): + self._maybe_config_dropout_masks(c, input_sequence, s) + s = list(s) if tree.is_nested(s) else [s] + cell_call_fn = c.__call__ if callable(c) else c.call + (input_sequence, _) = cell_call_fn(input_sequence, s) + + def _maybe_reset_dropout_masks(self, cell): + if isinstance(cell, DropoutRNNCell): + cell.reset_dropout_mask() + cell.reset_recurrent_dropout_mask() + if isinstance(cell, StackedRNNCells): + for c in cell.cells: + self._maybe_reset_dropout_masks(c) + + def get_config(self): + config = {'return_sequences': self.return_sequences, 'return_state': self.return_state, 'go_backwards': self.go_backwards, 'stateful': self.stateful, 'unroll': self.unroll, 'zero_output_for_mask': self.zero_output_for_mask} + config['cell'] = serialization_lib.serialize_keras_object(self.cell) + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + cell = serialization_lib.deserialize_keras_object(config.pop('cell'), custom_objects=custom_objects) + layer = cls(cell, **config) + return layer + +# File: keras-master/keras/src/layers/rnn/simple_rnn.py +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + +@keras_export('keras.layers.SimpleRNNCell') +class SimpleRNNCell(Layer, DropoutRNNCell): + + def __init__(self, units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, **kwargs): + if units <= 0: + raise ValueError(f'Received an invalid value for argument `units`, expected a positive integer, got {units}.') + super().__init__(**kwargs) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed) + self.units = units + self.activation = activations.get(activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.state_size = self.units + self.output_size = self.units + + def build(self, input_shape): + self.kernel = self.add_weight(shape=(input_shape[-1], self.units), name='kernel', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) + self.recurrent_kernel = self.add_weight(shape=(self.units, self.units), name='recurrent_kernel', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) + if self.use_bias: + self.bias = self.add_weight(shape=(self.units,), name='bias', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) + else: + self.bias = None + self.built = True + + def call(self, sequence, states, training=False): + prev_output = states[0] if isinstance(states, (list, tuple)) else states + dp_mask = self.get_dropout_mask(sequence) + rec_dp_mask = self.get_recurrent_dropout_mask(prev_output) + if training and dp_mask is not None: + sequence = sequence * dp_mask + h = ops.matmul(sequence, self.kernel) + if self.bias is not None: + h += self.bias + if training and rec_dp_mask is not None: + prev_output = prev_output * rec_dp_mask + output = h + ops.matmul(prev_output, self.recurrent_kernel) + if self.activation is not None: + output = self.activation(output) + new_state = [output] if isinstance(states, (list, tuple)) else output + return (output, new_state) + + def get_initial_state(self, batch_size=None): + return [ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype)] + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.layers.SimpleRNN') +class SimpleRNN(RNN): + + def __init__(self, units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, seed=None, **kwargs): + cell = SimpleRNNCell(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, dtype=kwargs.get('dtype', None), trainable=kwargs.get('trainable', True), name='simple_rnn_cell') + super().__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, **kwargs) + self.input_spec = [InputSpec(ndim=3)] + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call(sequences, mask=mask, training=training, initial_state=initial_state) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = {'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} + base_config = super().get_config() + del base_config['cell'] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) + +# File: keras-master/keras/src/layers/rnn/stacked_rnn_cells.py +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib + +@keras_export('keras.layers.StackedRNNCells') +class StackedRNNCells(Layer): + + def __init__(self, cells, **kwargs): + super().__init__(**kwargs) + for cell in cells: + if 'call' not in dir(cell): + raise ValueError(f'All cells must have a `call` method. Received cell without a `call` method: {cell}') + if 'state_size' not in dir(cell): + raise ValueError(f'All cells must have a `state_size` attribute. Received cell without a `state_size`: {cell}') + self.cells = cells + + @property + def state_size(self): + return [c.state_size for c in self.cells] + + @property + def output_size(self): + if getattr(self.cells[-1], 'output_size', None) is not None: + return self.cells[-1].output_size + elif isinstance(self.cells[-1].state_size, (list, tuple)): + return self.cells[-1].state_size[0] + else: + return self.cells[-1].state_size + + def get_initial_state(self, batch_size=None): + initial_states = [] + for cell in self.cells: + get_initial_state_fn = getattr(cell, 'get_initial_state', None) + if get_initial_state_fn: + initial_states.append(get_initial_state_fn(batch_size=batch_size)) + elif isinstance(cell.state_size, int): + initial_states.append(ops.zeros((batch_size, cell.state_size), dtype=self.compute_dtype)) + else: + initial_states.append([ops.zeros((batch_size, d), dtype=self.compute_dtype) for d in cell.state_size]) + return initial_states + + def call(self, inputs, states, training=False, **kwargs): + new_states = [] + for (cell, states) in zip(self.cells, states): + state_is_list = tree.is_nested(states) + states = list(states) if tree.is_nested(states) else [states] + if isinstance(cell, Layer) and cell._call_has_training_arg: + kwargs['training'] = training + else: + kwargs.pop('training', None) + cell_call_fn = cell.__call__ if callable(cell) else cell.call + (inputs, states) = cell_call_fn(inputs, states, **kwargs) + if len(states) == 1 and (not state_is_list): + states = states[0] + new_states.append(states) + if len(new_states) == 1: + new_states = new_states[0] + return (inputs, new_states) + + def build(self, input_shape): + for cell in self.cells: + if isinstance(cell, Layer) and (not cell.built): + cell.build(input_shape) + cell.built = True + if getattr(cell, 'output_size', None) is not None: + output_dim = cell.output_size + elif isinstance(cell.state_size, (list, tuple)): + output_dim = cell.state_size[0] + else: + output_dim = cell.state_size + batch_size = tree.flatten(input_shape)[0] + input_shape = (batch_size, output_dim) + self.built = True + + def get_config(self): + cells = [] + for cell in self.cells: + cells.append(serialization_lib.serialize_keras_object(cell)) + config = {'cells': cells} + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + cells = [] + for cell_config in config.pop('cells'): + cells.append(serialization_lib.deserialize_keras_object(cell_config, custom_objects=custom_objects)) + return cls(cells, **config) + +# File: keras-master/keras/src/layers/rnn/time_distributed.py +"""""" +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer + +@keras_export('keras.layers.TimeDistributed') +class TimeDistributed(Wrapper): + + def __init__(self, layer, **kwargs): + if not isinstance(layer, Layer): + raise ValueError(f'Please initialize `TimeDistributed` layer with a `keras.layers.Layer` instance. Received: {layer}') + super().__init__(layer, **kwargs) + self.supports_masking = True + + def _get_child_input_shape(self, input_shape): + if not isinstance(input_shape, (tuple, list)) or len(input_shape) < 3: + raise ValueError(f'`TimeDistributed` Layer should be passed an `input_shape` with at least 3 dimensions, received: {input_shape}') + return (input_shape[0], *input_shape[2:]) + + def compute_output_shape(self, input_shape): + child_input_shape = self._get_child_input_shape(input_shape) + child_output_shape = self.layer.compute_output_shape(child_input_shape) + return (child_output_shape[0], input_shape[1], *child_output_shape[1:]) + + def build(self, input_shape): + child_input_shape = self._get_child_input_shape(input_shape) + super().build(child_input_shape) + self.built = True + + def call(self, inputs, training=None, mask=None): + input_shape = ops.shape(inputs) + mask_shape = None if mask is None else ops.shape(mask) + batch_size = input_shape[0] + timesteps = input_shape[1] + if mask_shape is not None and mask_shape[:2] != (batch_size, timesteps): + raise ValueError(f'`TimeDistributed` Layer should be passed a `mask` of shape ({batch_size}, {timesteps}, ...), received: mask.shape={mask_shape}') + + def time_distributed_transpose(data): + axes = [1, 0, *range(2, len(data.shape))] + return ops.transpose(data, axes=axes) + inputs = time_distributed_transpose(inputs) + if mask is not None: + mask = time_distributed_transpose(mask) + + def step_function(i): + kwargs = {} + if self.layer._call_has_mask_arg and mask is not None: + kwargs['mask'] = mask[i] + if self.layer._call_has_training_arg: + kwargs['training'] = training + return self.layer.call(inputs[i], **kwargs) + if inputs.shape[0] is not None: + outputs = ops.stack([step_function(i) for i in range(inputs.shape[0])]) + return time_distributed_transpose(outputs) + outputs = backend.vectorized_map(step_function, ops.arange(timesteps)) + return time_distributed_transpose(outputs) + +# File: keras-master/keras/src/legacy/backend.py +"""""" +import itertools +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf +py_any = any +py_all = all + +@keras_export('keras._legacy.backend.abs') +def abs(x): + return tf.abs(x) + +@keras_export('keras._legacy.backend.all') +def all(x, axis=None, keepdims=False): + x = tf.cast(x, tf.bool) + return tf.reduce_all(x, axis, keepdims) + +@keras_export('keras._legacy.backend.any') +def any(x, axis=None, keepdims=False): + x = tf.cast(x, tf.bool) + return tf.reduce_any(x, axis, keepdims) + +@keras_export('keras._legacy.backend.argmax') +def argmax(x, axis=-1): + return tf.argmax(x, axis) + +@keras_export('keras._legacy.backend.argmin') +def argmin(x, axis=-1): + return tf.argmin(x, axis) + +@keras_export('keras._legacy.backend.arange') +def arange(start, stop=None, step=1, dtype='int32'): + if stop is None and start < 0: + start = 0 + result = tf.range(start, limit=stop, delta=step, name='arange') + if dtype != 'int32': + result = tf.cast(result, dtype) + return result + +@keras_export('keras._legacy.backend.batch_dot') +def batch_dot(x, y, axes=None): + x_shape = x.shape + y_shape = y.shape + x_ndim = len(x_shape) + y_ndim = len(y_shape) + if x_ndim < 2 or y_ndim < 2: + raise ValueError('Cannot do batch_dot on inputs with rank < 2. Received inputs with tf.shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') + x_batch_size = x_shape[0] + y_batch_size = y_shape[0] + if x_batch_size is not None and y_batch_size is not None: + if x_batch_size != y_batch_size: + raise ValueError('Cannot do batch_dot on inputs with different batch sizes. Received inputs with tf.shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') + if isinstance(axes, int): + axes = [axes, axes] + if axes is None: + if y_ndim == 2: + axes = [x_ndim - 1, y_ndim - 1] + else: + axes = [x_ndim - 1, y_ndim - 2] + if py_any((isinstance(a, (list, tuple)) for a in axes)): + raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) + axes = list(axes) + if axes[0] < 0: + axes[0] += x_ndim + if axes[1] < 0: + axes[1] += y_ndim + if 0 in axes: + raise ValueError('Cannot perform batch_dot over axis 0. If your inputs are not batched, add a dummy batch dimension to your inputs using K.expand_dims(x, 0)') + (a0, a1) = axes + d1 = x_shape[a0] + d2 = y_shape[a1] + if d1 is not None and d2 is not None and (d1 != d2): + raise ValueError('Cannot do batch_dot on inputs with tf.shapes ' + str(x_shape) + ' and ' + str(y_shape) + ' with axes=' + str(axes) + '. x.shape[%d] != y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2)) + orig_x_ndim = x_ndim + orig_y_ndim = y_ndim + if x_ndim == 2: + x = tf.expand_dims(x, 1) + a0 += 1 + x_ndim += 1 + if y_ndim == 2: + y = tf.expand_dims(y, 2) + y_ndim += 1 + if a0 != x_ndim - 1: + pattern = list(range(x_ndim)) + for i in range(a0, x_ndim - 1): + pattern[i] = pattern[i + 1] + pattern[-1] = a0 + x = tf.transpose(x, pattern) + if a1 != 1: + pattern = list(range(y_ndim)) + for i in range(a1, 1, -1): + pattern[i] = pattern[i - 1] + pattern[1] = a1 + y = tf.transpose(y, pattern) + if x_ndim > 3: + x_shape = tf.shape(x) + x_mid_dims = x_shape[1:-1] + x_squashed_shape = tf.stack([x_shape[0], -1, x_shape[-1]]) + x = tf.reshape(x, x_squashed_shape) + x_squashed = True + else: + x_squashed = False + if y_ndim > 3: + y_shape = tf.shape(y) + y_trail_dims = y_shape[2:] + y_squashed_shape = tf.stack([y_shape[0], y_shape[1], -1]) + y = tf.reshape(y, y_squashed_shape) + y_squashed = True + else: + y_squashed = False + result = tf.matmul(x, y) + output_shape = tf.shape(result) + do_reshape = False + if x_squashed: + output_shape = tf.concat([output_shape[:1], x_mid_dims, output_shape[-1:]], 0) + do_reshape = True + if y_squashed: + output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0) + do_reshape = True + if do_reshape: + result = tf.reshape(result, output_shape) + if orig_x_ndim == 2: + result = tf.squeeze(result, 1) + elif orig_y_ndim == 2: + result = tf.squeeze(result, -1) + return result + +@keras_export('keras._legacy.backend.batch_flatten') +def batch_flatten(x): + x = tf.reshape(x, tf.stack([-1, prod(tf.shape(x)[1:])])) + return x + +@keras_export('keras._legacy.backend.batch_get_value') +def batch_get_value(tensors): + return [x.numpy() for x in tensors] + +@keras_export('keras._legacy.backend.batch_set_value') +def batch_set_value(tuples): + if tf.executing_eagerly() or tf.inside_function(): + for (x, value) in tuples: + value = np.asarray(value, dtype=x.dtype.name) + x.assign(value) + +@keras_export('keras._legacy.backend.batch_normalization') +def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001): + return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) + +@keras_export('keras._legacy.backend.bias_add') +def bias_add(x, bias, data_format=None): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + bias_shape = bias.shape + if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: + raise ValueError(f'Unexpected bias dimensions {len(bias_shape)}. Expected it to be 1 or {ndim(x) - 1} dimensions') + if len(bias_shape) == 1: + if data_format == 'channels_first': + return tf.nn.bias_add(x, bias, data_format='NCHW') + return tf.nn.bias_add(x, bias, data_format='NHWC') + if ndim(x) in (3, 4, 5): + if data_format == 'channels_first': + bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1] + return x + reshape(bias, bias_reshape_axis) + return x + reshape(bias, (1,) + bias_shape) + return tf.nn.bias_add(x, bias) + +@keras_export('keras._legacy.backend.binary_crossentropy') +def binary_crossentropy(target, output, from_logits=False): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + if from_logits: + return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + bce = target * tf.math.log(output + backend.epsilon()) + bce += (1 - target) * tf.math.log(1 - output + backend.epsilon()) + return -bce + +@keras_export('keras._legacy.backend.binary_focal_crossentropy') +def binary_focal_crossentropy(target, output, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False): + sigmoidal = tf.sigmoid(output) if from_logits else output + p_t = target * sigmoidal + (1 - target) * (1 - sigmoidal) + focal_factor = tf.pow(1.0 - p_t, gamma) + bce = binary_crossentropy(target=target, output=output, from_logits=from_logits) + focal_bce = focal_factor * bce + if apply_class_balancing: + weight = target * alpha + (1 - target) * (1 - alpha) + focal_bce = weight * focal_bce + return focal_bce + +@keras_export('keras._legacy.backend.cast') +def cast(x, dtype): + return tf.cast(x, dtype) + +@keras_export('keras._legacy.backend.cast_to_floatx') +def cast_to_floatx(x): + if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): + return tf.cast(x, dtype=backend.floatx()) + return np.asarray(x, dtype=backend.floatx()) + +@keras_export('keras._legacy.backend.categorical_crossentropy') +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + target.shape.assert_is_compatible_with(output.shape) + if from_logits: + return tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=output, axis=axis) + output = output / tf.reduce_sum(output, axis, True) + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + return -tf.reduce_sum(target * tf.math.log(output), axis) + +@keras_export('keras._legacy.backend.categorical_focal_crossentropy') +def categorical_focal_crossentropy(target, output, alpha=0.25, gamma=2.0, from_logits=False, axis=-1): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + target.shape.assert_is_compatible_with(output.shape) + if from_logits: + output = tf.nn.softmax(output, axis=axis) + output = output / tf.reduce_sum(output, axis=axis, keepdims=True) + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + cce = -target * tf.math.log(output) + modulating_factor = tf.pow(1.0 - output, gamma) + weighting_factor = tf.multiply(modulating_factor, alpha) + focal_cce = tf.multiply(weighting_factor, cce) + focal_cce = tf.reduce_sum(focal_cce, axis=axis) + return focal_cce + +@keras_export('keras._legacy.backend.clip') +def clip(x, min_value, max_value): + if isinstance(min_value, (int, float)) and isinstance(max_value, (int, float)): + if max_value < min_value: + max_value = min_value + if min_value is None: + min_value = -np.inf + if max_value is None: + max_value = np.inf + return tf.clip_by_value(x, min_value, max_value) + +@keras_export('keras._legacy.backend.concatenate') +def concatenate(tensors, axis=-1): + if axis < 0: + rank = ndim(tensors[0]) + if rank: + axis %= rank + else: + axis = 0 + if py_all((is_sparse(x) for x in tensors)): + return tf.compat.v1.sparse_concat(axis, tensors) + elif py_all((isinstance(x, tf.RaggedTensor) for x in tensors)): + return tf.concat(tensors, axis) + else: + return tf.concat([to_dense(x) for x in tensors], axis) + +@keras_export('keras._legacy.backend.constant') +def constant(value, dtype=None, shape=None, name=None): + if dtype is None: + dtype = backend.floatx() + return tf.constant(value, dtype=dtype, shape=shape, name=name) + +def _preprocess_conv1d_input(x, data_format): + tf_data_format = 'NWC' + if data_format == 'channels_first': + tf_data_format = 'NCW' + return (x, tf_data_format) + +def _preprocess_conv2d_input(x, data_format, force_transpose=False): + tf_data_format = 'NHWC' + if data_format == 'channels_first': + if force_transpose: + x = tf.transpose(x, (0, 2, 3, 1)) + else: + tf_data_format = 'NCHW' + return (x, tf_data_format) + +def _preprocess_conv3d_input(x, data_format): + tf_data_format = 'NDHWC' + if data_format == 'channels_first': + tf_data_format = 'NCDHW' + return (x, tf_data_format) + +def _preprocess_padding(padding): + if padding == 'same': + padding = 'SAME' + elif padding == 'valid': + padding = 'VALID' + else: + raise ValueError(f'Invalid padding: {padding}') + return padding + +@keras_export('keras._legacy.backend.conv1d') +def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + kernel_shape = kernel.shape.as_list() + if padding == 'causal': + left_pad = dilation_rate * (kernel_shape[0] - 1) + x = temporal_padding(x, (left_pad, 0)) + padding = 'valid' + padding = _preprocess_padding(padding) + (x, tf_data_format) = _preprocess_conv1d_input(x, data_format) + x = tf.compat.v1.nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NWC': + x = tf.transpose(x, (0, 2, 1)) + return x + +@keras_export('keras._legacy.backend.conv2d') +def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + (x, tf_data_format) = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + x = tf.compat.v1.nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = tf.transpose(x, (0, 3, 1, 2)) + return x + +@keras_export('keras._legacy.backend.conv2d_transpose') +def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + if data_format == 'channels_first' and dilation_rate != (1, 1): + force_transpose = True + else: + force_transpose = False + (x, tf_data_format) = _preprocess_conv2d_input(x, data_format, force_transpose) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[1]) + if output_shape[0] is None: + output_shape = (tf.shape(x)[0],) + tuple(output_shape[1:]) + if isinstance(output_shape, (tuple, list)): + output_shape = tf.stack(list(output_shape)) + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + if dilation_rate == (1, 1): + x = tf.compat.v1.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) + else: + if dilation_rate[0] != dilation_rate[1]: + raise ValueError(f'Expected the 2 dimensions of the `dilation_rate` argument to be equal to each other. Received: dilation_rate={dilation_rate}') + x = tf.nn.atrous_conv2d_transpose(x, kernel, output_shape, rate=dilation_rate[0], padding=padding) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = tf.transpose(x, (0, 3, 1, 2)) + return x + +@keras_export('keras._legacy.backend.conv3d') +def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + (x, tf_data_format) = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + x = tf.compat.v1.nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + x = tf.transpose(x, (0, 4, 1, 2, 3)) + return x + +@keras_export('keras._legacy.backend.cos') +def cos(x): + return tf.cos(x) + +@keras_export('keras._legacy.backend.count_params') +def count_params(x): + return np.prod(x.shape.as_list()) + +@keras_export('keras._legacy.backend.ctc_batch_cost') +def ctc_batch_cost(y_true, y_pred, input_length, label_length): + label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32) + input_length = tf.cast(tf.squeeze(input_length, axis=-1), tf.int32) + sparse_labels = tf.cast(ctc_label_dense_to_sparse(y_true, label_length), tf.int32) + y_pred = tf.math.log(tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()) + return tf.expand_dims(tf.compat.v1.nn.ctc_loss(inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1) + +@keras_export('keras._legacy.backend.ctc_label_dense_to_sparse') +def ctc_label_dense_to_sparse(labels, label_lengths): + label_shape = tf.shape(labels) + num_batches_tns = tf.stack([label_shape[0]]) + max_num_labels_tns = tf.stack([label_shape[1]]) + + def range_less_than(old_input, current_input): + return tf.expand_dims(tf.range(tf.shape(old_input)[1]), 0) < tf.fill(max_num_labels_tns, current_input) + init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool) + dense_mask = tf.compat.v1.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) + dense_mask = dense_mask[:, 0, :] + label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) + label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask) + batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) + batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask) + indices = tf.transpose(tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) + vals_sparse = tf.compat.v1.gather_nd(labels, indices) + return tf.SparseTensor(tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64)) + +@keras_export('keras._legacy.backend.ctc_decode') +def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): + input_shape = tf.shape(y_pred) + (num_samples, num_steps) = (input_shape[0], input_shape[1]) + y_pred = tf.math.log(tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()) + input_length = tf.cast(input_length, tf.int32) + if greedy: + (decoded, log_prob) = tf.nn.ctc_greedy_decoder(inputs=y_pred, sequence_length=input_length) + else: + (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths) + decoded_dense = [] + for st in decoded: + st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) + decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) + return (decoded_dense, log_prob) + +@keras_export('keras._legacy.backend.cumsum') +def cumsum(x, axis=0): + return tf.cumsum(x, axis=axis) + +@keras_export('keras._legacy.backend.cumprod') +def cumprod(x, axis=0): + return tf.math.cumprod(x, axis=axis) + +@keras_export('keras._legacy.backend.depthwise_conv2d') +def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + (x, tf_data_format) = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + x = tf.nn.depthwise_conv2d(x, depthwise_kernel, strides=strides, padding=padding, dilations=dilation_rate, data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = tf.transpose(x, (0, 3, 1, 2)) + return x + +@keras_export('keras._legacy.backend.dot') +def dot(x, y): + if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): + x_shape = [] + for (i, s) in zip(x.shape, tf.unstack(tf.shape(x))): + if i is not None: + x_shape.append(i) + else: + x_shape.append(s) + x_shape = tuple(x_shape) + y_shape = [] + for (i, s) in zip(y.shape, tf.unstack(tf.shape(y))): + if i is not None: + y_shape.append(i) + else: + y_shape.append(s) + y_shape = tuple(y_shape) + y_permute_dim = list(range(ndim(y))) + y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim + xt = tf.reshape(x, [-1, x_shape[-1]]) + yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) + return tf.reshape(tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) + if is_sparse(x): + out = tf.sparse.sparse_dense_matmul(x, y) + else: + out = tf.matmul(x, y) + return out + +@keras_export('keras._legacy.backend.dropout') +def dropout(x, level, noise_shape=None, seed=None): + if seed is None: + seed = np.random.randint(10000000.0) + return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed) + +@keras_export('keras._legacy.backend.dtype') +def dtype(x): + return x.dtype.base_dtype.name + +@keras_export('keras._legacy.backend.elu') +def elu(x, alpha=1.0): + res = tf.nn.elu(x) + if alpha == 1: + return res + else: + return tf.where(x > 0, res, alpha * res) + +@keras_export('keras._legacy.backend.equal') +def equal(x, y): + return tf.equal(x, y) + +@keras_export('keras._legacy.backend.eval') +def eval(x): + return get_value(to_dense(x)) + +@keras_export('keras._legacy.backend.exp') +def exp(x): + return tf.exp(x) + +@keras_export('keras._legacy.backend.expand_dims') +def expand_dims(x, axis=-1): + return tf.expand_dims(x, axis) + +@keras_export('keras._legacy.backend.eye') +def eye(size, dtype=None, name=None): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + return variable(tf.eye(size, dtype=tf_dtype), dtype, name) + +@keras_export('keras._legacy.backend.flatten') +def flatten(x): + return tf.reshape(x, [-1]) + +@keras_export('keras._legacy.backend.foldl') +def foldl(fn, elems, initializer=None, name=None): + return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name) + +@keras_export('keras._legacy.backend.foldr') +def foldr(fn, elems, initializer=None, name=None): + return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name) + +@keras_export('keras._legacy.backend.gather') +def gather(reference, indices): + return tf.compat.v1.gather(reference, indices) + +@keras_export('keras._legacy.backend.get_value') +def get_value(x): + if not tf.is_tensor(x): + return x + if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor): + return x.numpy() + if not getattr(x, '_in_graph_mode', True): + with tf.__internal__.eager_context.eager_mode(): + return x.numpy() + with tf.init_scope(): + return x.numpy() + +@keras_export('keras._legacy.backend.gradients') +def gradients(loss, variables): + return tf.compat.v1.gradients(loss, variables, colocate_gradients_with_ops=True) + +@keras_export('keras._legacy.backend.greater') +def greater(x, y): + return tf.greater(x, y) + +@keras_export('keras._legacy.backend.greater_equal') +def greater_equal(x, y): + return tf.greater_equal(x, y) + +@keras_export('keras._legacy.backend.hard_sigmoid') +def hard_sigmoid(x): + point_two = tf.convert_to_tensor(0.2, dtype=x.dtype) + point_five = tf.convert_to_tensor(0.5, dtype=x.dtype) + x = tf.multiply(x, point_two) + x = tf.add(x, point_five) + x = tf.clip_by_value(x, 0.0, 1.0) + return x + +@keras_export('keras._legacy.backend.in_top_k') +def in_top_k(predictions, targets, k): + return tf.compat.v1.math.in_top_k(predictions, targets, k) + +@keras_export('keras._legacy.backend.int_shape') +def int_shape(x): + try: + shape = x.shape + if not isinstance(shape, tuple): + shape = tuple(shape.as_list()) + return shape + except ValueError: + return None + +@keras_export('keras._legacy.backend.is_sparse') +def is_sparse(tensor): + spec = getattr(tensor, '_type_spec', None) + if spec is not None: + return isinstance(spec, tf.SparseTensorSpec) + return isinstance(tensor, tf.SparseTensor) + +@keras_export('keras._legacy.backend.l2_normalize') +def l2_normalize(x, axis=None): + return tf.linalg.l2_normalize(x, axis=axis) + +@keras_export('keras._legacy.backend.less') +def less(x, y): + return tf.less(x, y) + +@keras_export('keras._legacy.backend.less_equal') +def less_equal(x, y): + return tf.less_equal(x, y) + +@keras_export('keras._legacy.backend.log') +def log(x): + return tf.math.log(x) + +@keras_export('keras._legacy.backend.map_fn') +def map_fn(fn, elems, name=None, dtype=None): + return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype) + +@keras_export('keras._legacy.backend.max') +def max(x, axis=None, keepdims=False): + return tf.reduce_max(x, axis, keepdims) + +@keras_export('keras._legacy.backend.maximum') +def maximum(x, y): + return tf.maximum(x, y) + +@keras_export('keras._legacy.backend.mean') +def mean(x, axis=None, keepdims=False): + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.reduce_mean(x, axis, keepdims) + +@keras_export('keras._legacy.backend.min') +def min(x, axis=None, keepdims=False): + return tf.reduce_min(x, axis, keepdims) + +@keras_export('keras._legacy.backend.minimum') +def minimum(x, y): + return tf.minimum(x, y) + +@keras_export('keras._legacy.backend.moving_average_update') +def moving_average_update(x, value, momentum): + momentum = tf.cast(momentum, x.dtype) + value = tf.cast(value, x.dtype) + return x.assign_sub((x - value) * (1 - momentum)) + +@keras_export('keras._legacy.backend.name_scope') +def name_scope(name): + return tf.name_scope(name) + +@keras_export('keras._legacy.backend.ndim') +def ndim(x): + return x.shape.rank + +@keras_export('keras._legacy.backend.not_equal') +def not_equal(x, y): + return tf.not_equal(x, y) + +@keras_export('keras._legacy.backend.one_hot') +def one_hot(indices, num_classes): + return tf.one_hot(indices, depth=num_classes, axis=-1) + +@keras_export('keras._legacy.backend.ones') +def ones(shape, dtype=None, name=None): + with tf.init_scope(): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + v = tf.ones(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + return v + +@keras_export('keras._legacy.backend.ones_like') +def ones_like(x, dtype=None, name=None): + return tf.ones_like(x, dtype=dtype, name=name) + +@keras_export('keras._legacy.backend.permute_dimensions') +def permute_dimensions(x, pattern): + return tf.transpose(x, perm=pattern) + +@keras_export('keras._legacy.backend.pool2d') +def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + if len(pool_size) != 2: + raise ValueError('`pool_size` must be a tuple of 2 integers.') + if len(strides) != 2: + raise ValueError('`strides` must be a tuple of 2 integers.') + (x, tf_data_format) = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + if pool_mode == 'max': + x = tf.compat.v1.nn.max_pool(x, pool_size, strides, padding=padding, data_format=tf_data_format) + elif pool_mode == 'avg': + x = tf.compat.v1.nn.avg_pool(x, pool_size, strides, padding=padding, data_format=tf_data_format) + else: + raise ValueError('Invalid pooling mode: ' + str(pool_mode)) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = tf.transpose(x, (0, 3, 1, 2)) + return x + +@keras_export('keras._legacy.backend.pool3d') +def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + (x, tf_data_format) = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NDHWC': + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + if pool_mode == 'max': + x = tf.nn.max_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format) + elif pool_mode == 'avg': + x = tf.nn.avg_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format) + else: + raise ValueError('Invalid pooling mode: ' + str(pool_mode)) + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + x = tf.transpose(x, (0, 4, 1, 2, 3)) + return x + +@keras_export('keras._legacy.backend.pow') +def pow(x, a): + return tf.pow(x, a) + +@keras_export('keras._legacy.backend.prod') +def prod(x, axis=None, keepdims=False): + return tf.reduce_prod(x, axis, keepdims) + +@keras_export('keras._legacy.backend.random_bernoulli') +def random_bernoulli(shape, p=0.0, dtype=None, seed=None): + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10000000.0) + return tf.where(tf.random.uniform(shape, dtype=dtype, seed=seed) <= p, tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype)) + +@keras_export('keras._legacy.backend.random_normal') +def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10000000.0) + return tf.random.normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + +@keras_export('keras._legacy.backend.random_normal_variable') +def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + if seed is None: + seed = np.random.randint(1000000000.0) + value = tf.compat.v1.random_normal_initializer(mean, scale, dtype=tf_dtype, seed=seed)(shape) + return variable(value, dtype=dtype, name=name) + +@keras_export('keras._legacy.backend.random_uniform') +def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10000000.0) + return tf.random.uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + +@keras_export('keras._legacy.backend.random_uniform_variable') +def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + if seed is None: + seed = np.random.randint(1000000000.0) + value = tf.compat.v1.random_uniform_initializer(low, high, dtype=tf_dtype, seed=seed)(shape) + return variable(value, dtype=dtype, name=name) + +@keras_export('keras._legacy.backend.reshape') +def reshape(x, shape): + return tf.reshape(x, shape) + +@keras_export('keras._legacy.backend.relu') +def relu(x, alpha=0.0, max_value=None, threshold=0.0): + dtype = getattr(x, 'dtype', backend.floatx()) + if alpha != 0.0: + if max_value is None and threshold == 0: + return tf.nn.leaky_relu(x, alpha=alpha) + if threshold != 0: + negative_part = tf.nn.relu(-x + threshold) + else: + negative_part = tf.nn.relu(-x) + clip_max = max_value is not None + if threshold != 0: + x = x * tf.cast(tf.greater(x, threshold), dtype=dtype) + elif max_value == 6: + x = tf.nn.relu6(x) + clip_max = False + else: + x = tf.nn.relu(x) + if clip_max: + max_value = tf.convert_to_tensor(max_value, dtype=x.dtype) + zero = tf.convert_to_tensor(0, dtype=x.dtype) + x = tf.clip_by_value(x, zero, max_value) + if alpha != 0.0: + alpha = tf.convert_to_tensor(alpha, dtype=x.dtype) + x -= alpha * negative_part + return x + +@keras_export('keras._legacy.backend.repeat') +def repeat(x, n): + assert ndim(x) == 2 + x = tf.expand_dims(x, 1) + pattern = tf.stack([1, n, 1]) + return tf.tile(x, pattern) + +@keras_export('keras._legacy.backend.repeat_elements') +def repeat_elements(x, rep, axis): + x_shape = x.shape.as_list() + if x_shape[axis] is not None: + splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) + x_rep = [s for s in splits for _ in range(rep)] + return concatenate(x_rep, axis) + auxiliary_axis = axis + 1 + x_shape = tf.shape(x) + x_rep = tf.expand_dims(x, axis=auxiliary_axis) + reps = np.ones(len(x.shape) + 1) + reps[auxiliary_axis] = rep + x_rep = tf.tile(x_rep, reps) + reps = np.delete(reps, auxiliary_axis) + reps[axis] = rep + reps = tf.constant(reps, dtype='int32') + x_shape *= reps + x_rep = tf.reshape(x_rep, x_shape) + x_shape = x.shape.as_list() + x_rep.set_shape(x_shape) + return x_rep + +@keras_export('keras._legacy.backend.resize_images') +def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'): + if data_format == 'channels_first': + (rows, cols) = (2, 3) + elif data_format == 'channels_last': + (rows, cols) = (1, 2) + else: + raise ValueError(f'Invalid `data_format` argument: {data_format}') + new_shape = x.shape[rows:cols + 1] + if new_shape.is_fully_defined(): + new_shape = tf.constant(new_shape.as_list(), dtype='int32') + else: + new_shape = tf.shape(x)[rows:cols + 1] + new_shape *= tf.constant(np.array([height_factor, width_factor], dtype='int32')) + if data_format == 'channels_first': + x = permute_dimensions(x, [0, 2, 3, 1]) + interpolations = {'area': tf.image.ResizeMethod.AREA, 'bicubic': tf.image.ResizeMethod.BICUBIC, 'bilinear': tf.image.ResizeMethod.BILINEAR, 'gaussian': tf.image.ResizeMethod.GAUSSIAN, 'lanczos3': tf.image.ResizeMethod.LANCZOS3, 'lanczos5': tf.image.ResizeMethod.LANCZOS5, 'mitchellcubic': tf.image.ResizeMethod.MITCHELLCUBIC, 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR} + interploations_list = '"' + '", "'.join(interpolations.keys()) + '"' + if interpolation in interpolations: + x = tf.image.resize(x, new_shape, method=interpolations[interpolation]) + else: + raise ValueError(f'`interpolation` argument should be one of: {interploations_list}. Received: "{interpolation}".') + if data_format == 'channels_first': + x = permute_dimensions(x, [0, 3, 1, 2]) + return x + +@keras_export('keras._legacy.backend.resize_volumes') +def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): + if data_format == 'channels_first': + output = repeat_elements(x, depth_factor, axis=2) + output = repeat_elements(output, height_factor, axis=3) + output = repeat_elements(output, width_factor, axis=4) + return output + elif data_format == 'channels_last': + output = repeat_elements(x, depth_factor, axis=1) + output = repeat_elements(output, height_factor, axis=2) + output = repeat_elements(output, width_factor, axis=3) + return output + else: + raise ValueError(f'Invalid data_format: {data_format}') + +@keras_export('keras._legacy.backend.reverse') +def reverse(x, axes): + if isinstance(axes, int): + axes = [axes] + return tf.reverse(x, axes) + +@keras_export('keras._legacy.backend.rnn') +def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False, return_all_outputs=True): + if not tf.__internal__.tf2.enabled(): + return_all_outputs = True + + def swap_batch_timestep(input_t): + axes = list(range(len(input_t.shape))) + (axes[0], axes[1]) = (1, 0) + return tf.transpose(input_t, axes) + if not time_major: + inputs = tf.nest.map_structure(swap_batch_timestep, inputs) + flatted_inputs = tf.nest.flatten(inputs) + time_steps = flatted_inputs[0].shape[0] + batch = flatted_inputs[0].shape[1] + time_steps_t = tf.shape(flatted_inputs[0])[0] + for input_ in flatted_inputs: + input_.shape.with_rank_at_least(3) + if mask is not None: + if mask.dtype != tf.bool: + mask = tf.cast(mask, tf.bool) + if len(mask.shape) == 2: + mask = expand_dims(mask) + if not time_major: + mask = swap_batch_timestep(mask) + if constants is None: + constants = [] + + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tf.nest.is_nested(mask_t): + raise ValueError(f'mask_t is expected to be tensor, but got {mask_t}') + if tf.nest.is_nested(input_t): + raise ValueError(f'input_t is expected to be tensor, but got {input_t}') + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = tf.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] + return tf.tile(mask_t, multiples) + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + def _process_single_input_t(input_t): + input_t = tf.unstack(input_t) + if go_backwards: + input_t.reverse() + return input_t + if tf.nest.is_nested(inputs): + processed_input = tf.nest.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tf.nest.pack_sequence_as(inputs, inp) + if mask is not None: + mask_list = tf.unstack(mask) + if go_backwards: + mask_list.reverse() + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + (output, new_states) = step_function(inp, tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + if not successive_outputs: + prev_output = zeros_like(output) + else: + prev_output = successive_outputs[-1] + output = tf.where(tiled_mask_t, output, prev_output) + flat_states = tf.nest.flatten(states) + flat_new_states = tf.nest.flatten(new_states) + tiled_mask_t = tuple((_expand_mask(mask_t, s) for s in flat_states)) + flat_final_states = tuple((tf.where(m, s, ps) for (m, s, ps) in zip(tiled_mask_t, flat_new_states, flat_states))) + states = tf.nest.pack_sequence_as(states, flat_final_states) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + if zero_output_for_mask: + last_output = tf.where(_expand_mask(mask_list[-1], last_output), last_output, zeros_like(last_output)) + outputs = tf.where(_expand_mask(mask, outputs, fixed_dim=2), outputs, zeros_like(outputs)) + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + (output, states) = step_function(inp, tuple(states) + tuple(constants)) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + else: + states = tuple(initial_states) + input_ta = tuple((tf.TensorArray(dtype=inp.dtype, size=time_steps_t, tensor_array_name=f'input_ta_{i}') for (i, inp) in enumerate(flatted_inputs))) + input_ta = tuple((ta.unstack(input_) if not go_backwards else ta.unstack(reverse(input_, 0)) for (ta, input_) in zip(input_ta, flatted_inputs))) + input_time_zero = tf.nest.pack_sequence_as(inputs, [inp[0] for inp in flatted_inputs]) + (output_time_zero, _) = step_function(input_time_zero, tuple(initial_states) + tuple(constants)) + output_ta_size = time_steps_t if return_all_outputs else 1 + output_ta = tuple((tf.TensorArray(dtype=out.dtype, size=output_ta_size, element_shape=out.shape, tensor_array_name=f'output_ta_{i}') for (i, out) in enumerate(tf.nest.flatten(output_time_zero)))) + time = tf.constant(0, dtype='int32', name='time') + if input_length is None: + max_iterations = time_steps_t + else: + max_iterations = tf.reduce_max(input_length) + while_loop_kwargs = {'cond': lambda time, *_: time < time_steps_t, 'maximum_iterations': max_iterations, 'parallel_iterations': 32, 'swap_memory': True} + if mask is not None: + if go_backwards: + mask = reverse(mask, 0) + mask_ta = tf.TensorArray(dtype=tf.bool, size=time_steps_t, tensor_array_name='mask_ta') + mask_ta = mask_ta.unstack(mask) + + def masking_fn(time): + return mask_ta.read(time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + tiled_mask_t = tuple((_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in flat_out)) + return tuple((tf.where(m, o, fm) for (m, o, fm) in zip(tiled_mask_t, flat_out, flat_mask))) + elif isinstance(input_length, tf.Tensor): + if go_backwards: + max_len = tf.reduce_max(input_length, axis=0) + rev_input_length = tf.subtract(max_len - 1, input_length) + + def masking_fn(time): + return tf.less(rev_input_length, time) + else: + + def masking_fn(time): + return tf.greater(input_length, time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + return tuple((tf.compat.v1.where(mask_t, o, zo) for (o, zo) in zip(flat_out, flat_mask))) + else: + masking_fn = None + if masking_fn is not None: + flat_zero_output = tuple((tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero))) + + def _step(time, output_ta_t, prev_output, *states): + current_input = tuple((ta.read(time) for ta in input_ta)) + current_input = tf.nest.pack_sequence_as(inputs, current_input) + mask_t = masking_fn(time) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_output = tf.nest.flatten(output) + flat_mask_output = flat_zero_output if zero_output_for_mask else tf.nest.flatten(prev_output) + flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) + flat_state = tf.nest.flatten(states) + flat_new_state = tf.nest.flatten(new_states) + for (state, new_state) in zip(flat_state, flat_new_state): + if isinstance(new_state, tf.Tensor): + new_state.set_shape(state.shape) + flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) + new_states = tf.nest.pack_sequence_as(new_states, flat_final_state) + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple((ta.write(ta_index_to_write, out) for (ta, out) in zip(output_ta_t, flat_new_output))) + return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states) + final_outputs = tf.compat.v1.while_loop(body=_step, loop_vars=(time, output_ta, flat_zero_output) + states, **while_loop_kwargs) + new_states = final_outputs[3:] + else: + + def _step(time, output_ta_t, *states): + current_input = tuple((ta.read(time) for ta in input_ta)) + current_input = tf.nest.pack_sequence_as(inputs, current_input) + (output, new_states) = step_function(current_input, tuple(states) + tuple(constants)) + flat_state = tf.nest.flatten(states) + flat_new_state = tf.nest.flatten(new_states) + for (state, new_state) in zip(flat_state, flat_new_state): + if isinstance(new_state, tf.Tensor): + new_state.set_shape(state.shape) + flat_output = tf.nest.flatten(output) + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple((ta.write(ta_index_to_write, out) for (ta, out) in zip(output_ta_t, flat_output))) + new_states = tf.nest.pack_sequence_as(initial_states, flat_new_state) + return (time + 1, output_ta_t) + tuple(new_states) + final_outputs = tf.compat.v1.while_loop(body=_step, loop_vars=(time, output_ta) + states, **while_loop_kwargs) + new_states = final_outputs[2:] + output_ta = final_outputs[1] + outputs = tuple((o.stack() for o in output_ta)) + last_output = tuple((o[-1] for o in outputs)) + outputs = tf.nest.pack_sequence_as(output_time_zero, outputs) + last_output = tf.nest.pack_sequence_as(output_time_zero, last_output) + + def set_shape(output_): + if isinstance(output_, tf.Tensor): + shape = output_.shape.as_list() + if return_all_outputs: + shape[0] = time_steps + else: + shape[0] = 1 + shape[1] = batch + output_.set_shape(shape) + return output_ + outputs = tf.nest.map_structure(set_shape, outputs) + if not time_major: + outputs = tf.nest.map_structure(swap_batch_timestep, outputs) + return (last_output, outputs, new_states) + +@keras_export('keras._legacy.backend.round') +def round(x): + return tf.round(x) + +@keras_export('keras._legacy.backend.separable_conv2d') +def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + if len(strides) != 2: + raise ValueError('`strides` must be a tuple of 2 integers.') + (x, tf_data_format) = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if not isinstance(strides, tuple): + strides = tuple(strides) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + x = tf.nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, dilations=dilation_rate, data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = tf.transpose(x, (0, 3, 1, 2)) + return x + +@keras_export('keras._legacy.backend.set_value') +def set_value(x, value): + value = np.asarray(value, dtype=x.dtype.name) + x.assign(value) + +@keras_export('keras._legacy.backend.shape') +def shape(x): + return tf.shape(x) + +@keras_export('keras._legacy.backend.sigmoid') +def sigmoid(x): + output = tf.sigmoid(x) + return output + +@keras_export('keras._legacy.backend.sign') +def sign(x): + return tf.sign(x) + +@keras_export('keras._legacy.backend.sin') +def sin(x): + return tf.sin(x) + +@keras_export('keras._legacy.backend.softmax') +def softmax(x, axis=-1): + if x.shape.rank <= 1: + raise ValueError(f'Cannot apply softmax to a tensor that is 1D. Received input: {x}') + if isinstance(axis, int): + output = tf.nn.softmax(x, axis=axis) + else: + numerator = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True)) + denominator = tf.reduce_sum(numerator, axis=axis, keepdims=True) + output = numerator / denominator + output._keras_logits = x + return output + +@keras_export('keras._legacy.backend.softplus') +def softplus(x): + return tf.math.softplus(x) + +@keras_export('keras._legacy.backend.softsign') +def softsign(x): + return tf.math.softsign(x) + +@keras_export('keras._legacy.backend.sparse_categorical_crossentropy') +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1, ignore_class=None): + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + target = cast(target, 'int64') + if not from_logits: + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1 - epsilon_) + output = tf.math.log(output) + if isinstance(output.shape, (tuple, list)): + output_rank = len(output.shape) + else: + output_rank = output.shape.ndims + if output_rank is not None: + axis %= output_rank + if axis != output_rank - 1: + permutation = list(itertools.chain(range(axis), range(axis + 1, output_rank), [axis])) + output = tf.transpose(output, perm=permutation) + elif axis != -1: + raise ValueError('Cannot compute sparse categorical crossentropy with `axis={}` on an output tensor with unknown rank'.format(axis)) + output_shape = tf.shape(output) + target_rank = target.shape.ndims + update_shape = target_rank is not None and output_rank is not None and (target_rank != output_rank - 1) + if update_shape: + target = flatten(target) + output = tf.reshape(output, [-1, output_shape[-1]]) + if ignore_class is not None: + valid_mask = tf.not_equal(target, cast(ignore_class, target.dtype)) + target = target[valid_mask] + output = output[valid_mask] + res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) + if ignore_class is not None: + res_shape = cast(output_shape[:-1], 'int64') + valid_mask = tf.reshape(valid_mask, res_shape) + res = tf.scatter_nd(tf.where(valid_mask), res, res_shape) + res._keras_mask = valid_mask + return res + if update_shape and output_rank >= 3: + res = tf.reshape(res, output_shape[:-1]) + return res + +@keras_export('keras._legacy.backend.spatial_2d_padding') +def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): + assert len(padding) == 2 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + if data_format == 'channels_first': + pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] + else: + pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] + return tf.compat.v1.pad(x, pattern) + +@keras_export('keras._legacy.backend.spatial_3d_padding') +def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): + assert len(padding) == 3 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + assert len(padding[2]) == 2 + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError(f'Unknown data_format: {data_format}') + if data_format == 'channels_first': + pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] + else: + pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]] + return tf.compat.v1.pad(x, pattern) + +@keras_export('keras._legacy.backend.sqrt') +def sqrt(x): + zero = tf.convert_to_tensor(0.0, x.dtype) + x = tf.maximum(x, zero) + return tf.sqrt(x) + +@keras_export('keras._legacy.backend.square') +def square(x): + return tf.square(x) + +@keras_export('keras._legacy.backend.squeeze') +def squeeze(x, axis): + return tf.squeeze(x, [axis]) + +@keras_export('keras._legacy.backend.stack') +def stack(x, axis=0): + return tf.stack(x, axis=axis) + +@keras_export('keras._legacy.backend.std') +def std(x, axis=None, keepdims=False): + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) + +@keras_export('keras._legacy.backend.stop_gradient') +def stop_gradient(variables): + if isinstance(variables, (list, tuple)): + return map(tf.stop_gradient, variables) + return tf.stop_gradient(variables) + +@keras_export('keras._legacy.backend.sum') +def sum(x, axis=None, keepdims=False): + return tf.reduce_sum(x, axis, keepdims) + +@keras_export('keras._legacy.backend.switch') +def switch(condition, then_expression, else_expression): + if condition.dtype != tf.bool: + condition = tf.cast(condition, 'bool') + cond_ndim = ndim(condition) + if not cond_ndim: + if not callable(then_expression): + + def then_expression_fn(): + return then_expression + else: + then_expression_fn = then_expression + if not callable(else_expression): + + def else_expression_fn(): + return else_expression + else: + else_expression_fn = else_expression + x = tf.compat.v1.cond(condition, then_expression_fn, else_expression_fn) + else: + if callable(then_expression): + then_expression = then_expression() + if callable(else_expression): + else_expression = else_expression() + expr_ndim = ndim(then_expression) + if cond_ndim > expr_ndim: + raise ValueError('Rank of `condition` should be less than or equal to rank of `then_expression` and `else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)=' + str(expr_ndim)) + if cond_ndim > 1: + ndim_diff = expr_ndim - cond_ndim + cond_shape = tf.concat([tf.shape(condition), [1] * ndim_diff], axis=0) + condition = tf.reshape(condition, cond_shape) + expr_shape = tf.shape(then_expression) + shape_diff = expr_shape - cond_shape + tile_shape = tf.where(shape_diff > 0, expr_shape, tf.ones_like(expr_shape)) + condition = tf.tile(condition, tile_shape) + x = tf.where(condition, then_expression, else_expression) + return x + +@keras_export('keras._legacy.backend.tanh') +def tanh(x): + return tf.tanh(x) + +@keras_export('keras._legacy.backend.temporal_padding') +def temporal_padding(x, padding=(1, 1)): + assert len(padding) == 2 + pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] + return tf.compat.v1.pad(x, pattern) + +@keras_export('keras._legacy.backend.tile') +def tile(x, n): + if isinstance(n, int): + n = [n] + return tf.tile(x, n) + +@keras_export('keras._legacy.backend.to_dense') +def to_dense(tensor): + if is_sparse(tensor): + return tf.sparse.to_dense(tensor) + else: + return tensor + +@keras_export('keras._legacy.backend.transpose') +def transpose(x): + return tf.transpose(x) + +@keras_export('keras._legacy.backend.truncated_normal') +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10000000.0) + return tf.random.truncated_normal(shape, mean, stddev, dtype=dtype, seed=seed) + +@keras_export('keras._legacy.backend.update') +def update(x, new_x): + return tf.compat.v1.assign(x, new_x) + +@keras_export('keras._legacy.backend.update_add') +def update_add(x, increment): + return tf.compat.v1.assign_add(x, increment) + +@keras_export('keras._legacy.backend.update_sub') +def update_sub(x, decrement): + return tf.compat.v1.assign_sub(x, decrement) + +@keras_export('keras._legacy.backend.var') +def var(x, axis=None, keepdims=False): + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims) + +@keras_export('keras._legacy.backend.variable') +def variable(value, dtype=None, name=None, constraint=None): + if dtype is None: + dtype = backend.floatx() + if hasattr(value, 'tocoo'): + sparse_coo = value.tocoo() + indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1)), 1) + v = tf.SparseTensor(indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) + v._keras_shape = sparse_coo.shape + return v + v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint) + return v + +@keras_export('keras._legacy.backend.zeros') +def zeros(shape, dtype=None, name=None): + with tf.init_scope(): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + v = tf.zeros(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + return v + +@keras_export('keras._legacy.backend.zeros_like') +def zeros_like(x, dtype=None, name=None): + return tf.zeros_like(x, dtype=dtype, name=name) + +# File: keras-master/keras/src/legacy/layers.py +"""""" +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils.module_utils import tensorflow as tf + +@keras_export('keras._legacy.layers.AlphaDropout') +class AlphaDropout(Layer): + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + alpha = 1.6732632423543772 + scale = 1.0507009873554805 + alpha_p = -alpha * scale + if self.noise_shape is None: + noise_shape = tf.shape(inputs) + else: + noise_shape = self.noise_shape + kept_idx = tf.greater_equal(backend.random.uniform(noise_shape), self.rate, seed=self.seed_generator) + kept_idx = tf.cast(kept_idx, inputs.dtype) + a = ((1 - self.rate) * (1 + self.rate * alpha_p ** 2)) ** (-0.5) + b = -a * alpha_p * self.rate + x = inputs * kept_idx + alpha_p * (1 - kept_idx) + return a * x + b + return inputs + + def get_config(self): + config = {'rate': self.rate, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + + def compute_output_shape(self, input_shape): + return input_shape + +@keras_export('keras._legacy.layers.RandomHeight') +class RandomHeight(Layer): + + def __init__(self, factor, interpolation='bilinear', seed=None, **kwargs): + super().__init__(**kwargs) + self.seed_generator = backend.random.SeedGenerator(seed) + self.factor = factor + if isinstance(factor, (tuple, list)): + self.height_lower = factor[0] + self.height_upper = factor[1] + else: + self.height_lower = -factor + self.height_upper = factor + if self.height_upper < self.height_lower: + raise ValueError(f'`factor` argument cannot have an upper bound lesser than the lower bound. Received: factor={factor}') + if self.height_lower < -1.0 or self.height_upper < -1.0: + raise ValueError(f'`factor` argument must have values larger than -1. Received: factor={factor}') + self.interpolation = interpolation + self.seed = seed + + def call(self, inputs, training=True): + inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype) + + def random_height_inputs(inputs): + inputs_shape = tf.shape(inputs) + img_hd = tf.cast(inputs_shape[-3], tf.float32) + img_wd = inputs_shape[-2] + height_factor = backend.random.uniform(shape=[], minval=1.0 + self.height_lower, maxval=1.0 + self.height_upper, seed=self.seed_generator) + adjusted_height = tf.cast(height_factor * img_hd, tf.int32) + adjusted_size = tf.stack([adjusted_height, img_wd]) + output = tf.image.resize(images=inputs, size=adjusted_size, method=self.interpolation) + output = tf.cast(output, self.compute_dtype) + output_shape = inputs.shape.as_list() + output_shape[-3] = None + output.set_shape(output_shape) + return output + if training: + return random_height_inputs(inputs) + else: + return inputs + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + input_shape[-3] = None + return tuple(input_shape) + + def get_config(self): + config = {'factor': self.factor, 'interpolation': self.interpolation, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras._legacy.layers.RandomWidth') +class RandomWidth(Layer): + + def __init__(self, factor, interpolation='bilinear', seed=None, **kwargs): + super().__init__(**kwargs) + self.seed_generator = backend.random.SeedGenerator(seed) + self.factor = factor + if isinstance(factor, (tuple, list)): + self.width_lower = factor[0] + self.width_upper = factor[1] + else: + self.width_lower = -factor + self.width_upper = factor + if self.width_upper < self.width_lower: + raise ValueError(f'`factor` argument cannot have an upper bound less than the lower bound. Received: factor={factor}') + if self.width_lower < -1.0 or self.width_upper < -1.0: + raise ValueError(f'`factor` argument must have values larger than -1. Received: factor={factor}') + self.interpolation = interpolation + self.seed = seed + + def call(self, inputs, training=True): + inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype) + + def random_width_inputs(inputs): + inputs_shape = tf.shape(inputs) + img_hd = inputs_shape[-3] + img_wd = tf.cast(inputs_shape[-2], tf.float32) + width_factor = backend.random.uniform(shape=[], minval=1.0 + self.width_lower, maxval=1.0 + self.width_upper, seed=self.seed_generator) + adjusted_width = tf.cast(width_factor * img_wd, tf.int32) + adjusted_size = tf.stack([img_hd, adjusted_width]) + output = tf.image.resize(images=inputs, size=adjusted_size, method=self.interpolation) + output = tf.cast(output, self.compute_dtype) + output_shape = inputs.shape.as_list() + output_shape[-2] = None + output.set_shape(output_shape) + return output + if training: + return random_width_inputs(inputs) + else: + return inputs + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + input_shape[-2] = None + return tuple(input_shape) + + def get_config(self): + config = {'factor': self.factor, 'interpolation': self.interpolation, 'seed': self.seed} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras._legacy.layers.ThresholdedReLU') +class ThresholdedReLU(Layer): + + def __init__(self, theta=1.0, **kwargs): + super().__init__(**kwargs) + if theta is None: + raise ValueError(f'Theta of a Thresholded ReLU layer cannot be None, expecting a float. Received: {theta}') + if theta < 0: + raise ValueError(f'The theta value of a Thresholded ReLU layer should be >=0. Received: {theta}') + self.supports_masking = True + self.theta = tf.convert_to_tensor(theta, dtype=self.compute_dtype) + + def call(self, inputs): + dtype = self.compute_dtype + return inputs * tf.cast(tf.greater(inputs, self.theta), dtype) + + def get_config(self): + config = {'theta': float(self.theta)} + base_config = super().get_config() + return {**base_config, **config} + + def compute_output_shape(self, input_shape): + return input_shape + +# File: keras-master/keras/src/legacy/losses.py +from keras.src.api_export import keras_export + +@keras_export('keras._legacy.losses.Reduction') +class Reduction: + AUTO = 'auto' + NONE = 'none' + SUM = 'sum' + SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' + + @classmethod + def all(cls): + return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) + + @classmethod + def validate(cls, key): + if key not in cls.all(): + raise ValueError(f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"') + +# File: keras-master/keras/src/legacy/preprocessing/image.py +"""""" +import collections +import multiprocessing +import os +import threading +import warnings +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.utils import image_utils +from keras.src.utils import io_utils +from keras.src.utils.module_utils import scipy + +@keras_export('keras._legacy.preprocessing.image.Iterator') +class Iterator(PyDataset): + white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff') + + def __init__(self, n, batch_size, shuffle, seed): + self.n = n + self.batch_size = batch_size + self.seed = seed + self.shuffle = shuffle + self.batch_index = 0 + self.total_batches_seen = 0 + self.lock = threading.Lock() + self.index_array = None + self.index_generator = self._flow_index() + + def _set_index_array(self): + self.index_array = np.arange(self.n) + if self.shuffle: + self.index_array = np.random.permutation(self.n) + + def __getitem__(self, idx): + if idx >= len(self): + raise ValueError('Asked to retrieve element {idx}, but the Sequence has length {length}'.format(idx=idx, length=len(self))) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + self.total_batches_seen += 1 + if self.index_array is None: + self._set_index_array() + index_array = self.index_array[self.batch_size * idx:self.batch_size * (idx + 1)] + return self._get_batches_of_transformed_samples(index_array) + + def __len__(self): + return (self.n + self.batch_size - 1) // self.batch_size + + def on_epoch_end(self): + self._set_index_array() + + def reset(self): + self.batch_index = 0 + + def _flow_index(self): + self.reset() + while 1: + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + if self.batch_index == 0: + self._set_index_array() + if self.n == 0: + current_index = 0 + else: + current_index = self.batch_index * self.batch_size % self.n + if self.n > current_index + self.batch_size: + self.batch_index += 1 + else: + self.batch_index = 0 + self.total_batches_seen += 1 + yield self.index_array[current_index:current_index + self.batch_size] + + def __iter__(self): + return self + + def __next__(self): + with self.lock: + index_array = next(self.index_generator) + return self._get_batches_of_transformed_samples(index_array) + + def _get_batches_of_transformed_samples(self, index_array): + raise NotImplementedError + +def _iter_valid_files(directory, white_list_formats, follow_links): + + def _recursive_list(subpath): + return sorted(os.walk(subpath, followlinks=follow_links), key=lambda x: x[0]) + for (root, _, files) in _recursive_list(directory): + for fname in sorted(files): + if fname.lower().endswith('.tiff'): + warnings.warn('Using ".tiff" files with multiple bands will cause distortion. Please verify your output.') + if fname.lower().endswith(white_list_formats): + yield (root, fname) + +def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links): + dirname = os.path.basename(directory) + if split: + all_files = list(_iter_valid_files(directory, white_list_formats, follow_links)) + num_files = len(all_files) + (start, stop) = (int(split[0] * num_files), int(split[1] * num_files)) + valid_files = all_files[start:stop] + else: + valid_files = _iter_valid_files(directory, white_list_formats, follow_links) + classes = [] + filenames = [] + for (root, fname) in valid_files: + classes.append(class_indices[dirname]) + absolute_path = os.path.join(root, fname) + relative_path = os.path.join(dirname, os.path.relpath(absolute_path, directory)) + filenames.append(relative_path) + return (classes, filenames) + +class BatchFromFilesMixin: + + def set_processing_attrs(self, image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation, keep_aspect_ratio): + self.image_data_generator = image_data_generator + self.target_size = tuple(target_size) + self.keep_aspect_ratio = keep_aspect_ratio + if color_mode not in {'rgb', 'rgba', 'grayscale'}: + raise ValueError(f'Invalid color mode: {color_mode}; expected "rgb", "rgba", or "grayscale".') + self.color_mode = color_mode + self.data_format = data_format + if self.color_mode == 'rgba': + if self.data_format == 'channels_last': + self.image_shape = self.target_size + (4,) + else: + self.image_shape = (4,) + self.target_size + elif self.color_mode == 'rgb': + if self.data_format == 'channels_last': + self.image_shape = self.target_size + (3,) + else: + self.image_shape = (3,) + self.target_size + elif self.data_format == 'channels_last': + self.image_shape = self.target_size + (1,) + else: + self.image_shape = (1,) + self.target_size + self.save_to_dir = save_to_dir + self.save_prefix = save_prefix + self.save_format = save_format + self.interpolation = interpolation + if subset is not None: + validation_split = self.image_data_generator._validation_split + if subset == 'validation': + split = (0, validation_split) + elif subset == 'training': + split = (validation_split, 1) + else: + raise ValueError(f'Invalid subset name: {subset};expected "training" or "validation"') + else: + split = None + self.split = split + self.subset = subset + + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype) + filepaths = self.filepaths + for (i, j) in enumerate(index_array): + img = image_utils.load_img(filepaths[j], color_mode=self.color_mode, target_size=self.target_size, interpolation=self.interpolation, keep_aspect_ratio=self.keep_aspect_ratio) + x = image_utils.img_to_array(img, data_format=self.data_format) + if hasattr(img, 'close'): + img.close() + if self.image_data_generator: + params = self.image_data_generator.get_random_transform(x.shape) + x = self.image_data_generator.apply_transform(x, params) + x = self.image_data_generator.standardize(x) + batch_x[i] = x + if self.save_to_dir: + for (i, j) in enumerate(index_array): + img = image_utils.array_to_img(batch_x[i], self.data_format, scale=True) + fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, index=j, hash=np.random.randint(10000000.0), format=self.save_format) + img.save(os.path.join(self.save_to_dir, fname)) + if self.class_mode == 'input': + batch_y = batch_x.copy() + elif self.class_mode in {'binary', 'sparse'}: + batch_y = np.empty(len(batch_x), dtype=self.dtype) + for (i, n_observation) in enumerate(index_array): + batch_y[i] = self.classes[n_observation] + elif self.class_mode == 'categorical': + batch_y = np.zeros((len(batch_x), len(self.class_indices)), dtype=self.dtype) + for (i, n_observation) in enumerate(index_array): + batch_y[i, self.classes[n_observation]] = 1.0 + elif self.class_mode == 'multi_output': + batch_y = [output[index_array] for output in self.labels] + elif self.class_mode == 'raw': + batch_y = self.labels[index_array] + else: + return batch_x + if self.sample_weight is None: + return (batch_x, batch_y) + else: + return (batch_x, batch_y, self.sample_weight[index_array]) + + @property + def filepaths(self): + raise NotImplementedError('`filepaths` property method has not been implemented in {}.'.format(type(self).__name__)) + + @property + def labels(self): + raise NotImplementedError('`labels` property method has not been implemented in {}.'.format(type(self).__name__)) + + @property + def sample_weight(self): + raise NotImplementedError('`sample_weight` property method has not been implemented in {}.'.format(type(self).__name__)) + +@keras_export('keras._legacy.preprocessing.image.DirectoryIterator') +class DirectoryIterator(BatchFromFilesMixin, Iterator): + allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} + + def __init__(self, directory, image_data_generator, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', keep_aspect_ratio=False, dtype=None): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + super().set_processing_attrs(image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation, keep_aspect_ratio) + self.directory = directory + self.classes = classes + if class_mode not in self.allowed_class_modes: + raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(class_mode, self.allowed_class_modes)) + self.class_mode = class_mode + self.dtype = dtype + self.samples = 0 + if not classes: + classes = [] + for subdir in sorted(os.listdir(directory)): + if os.path.isdir(os.path.join(directory, subdir)): + classes.append(subdir) + self.num_classes = len(classes) + self.class_indices = dict(zip(classes, range(len(classes)))) + pool = multiprocessing.pool.ThreadPool() + results = [] + self.filenames = [] + i = 0 + for dirpath in (os.path.join(directory, subdir) for subdir in classes): + results.append(pool.apply_async(_list_valid_filenames_in_directory, (dirpath, self.white_list_formats, self.split, self.class_indices, follow_links))) + classes_list = [] + for res in results: + (classes, filenames) = res.get() + classes_list.append(classes) + self.filenames += filenames + self.samples = len(self.filenames) + self.classes = np.zeros((self.samples,), dtype='int32') + for classes in classes_list: + self.classes[i:i + len(classes)] = classes + i += len(classes) + io_utils.print_msg(f'Found {self.samples} images belonging to {self.num_classes} classes.') + pool.close() + pool.join() + self._filepaths = [os.path.join(self.directory, fname) for fname in self.filenames] + super().__init__(self.samples, batch_size, shuffle, seed) + + @property + def filepaths(self): + return self._filepaths + + @property + def labels(self): + return self.classes + + @property + def sample_weight(self): + return None + +@keras_export('keras._legacy.preprocessing.image.NumpyArrayIterator') +class NumpyArrayIterator(Iterator): + + def __init__(self, x, y, image_data_generator, batch_size=32, shuffle=False, sample_weight=None, seed=None, data_format=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, ignore_class_split=False, dtype=None): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + self.dtype = dtype + if isinstance(x, tuple) or isinstance(x, list): + if not isinstance(x[1], list): + x_misc = [np.asarray(x[1])] + else: + x_misc = [np.asarray(xx) for xx in x[1]] + x = x[0] + for xx in x_misc: + if len(x) != len(xx): + raise ValueError(f'All of the arrays in `x` should have the same length. Found a pair with: len(x[0]) = {len(x)}, len(x[?]) = {len(xx)}') + else: + x_misc = [] + if y is not None and len(x) != len(y): + raise ValueError(f'`x` (images tensor) and `y` (labels) should have the same length. Found: x.shape = {np.asarray(x).shape}, y.shape = {np.asarray(y).shape}') + if sample_weight is not None and len(x) != len(sample_weight): + raise ValueError(f'`x` (images tensor) and `sample_weight` should have the same length. Found: x.shape = {np.asarray(x).shape}, sample_weight.shape = {np.asarray(sample_weight).shape}') + if subset is not None: + if subset not in {'training', 'validation'}: + raise ValueError(f'Invalid subset name: {subset}; expected "training" or "validation".') + split_idx = int(len(x) * image_data_generator._validation_split) + if y is not None and (not ignore_class_split) and (not np.array_equal(np.unique(y[:split_idx]), np.unique(y[split_idx:]))): + raise ValueError('Training and validation subsets have different number of classes after the split. If your numpy arrays are sorted by the label, you might want to shuffle them.') + if subset == 'validation': + x = x[:split_idx] + x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc] + if y is not None: + y = y[:split_idx] + else: + x = x[split_idx:] + x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc] + if y is not None: + y = y[split_idx:] + self.x = np.asarray(x, dtype=self.dtype) + self.x_misc = x_misc + if self.x.ndim != 4: + raise ValueError(f'Input data in `NumpyArrayIterator` should have rank 4. You passed an array with shape {self.x.shape}') + channels_axis = 3 if data_format == 'channels_last' else 1 + if self.x.shape[channels_axis] not in {1, 3, 4}: + warnings.warn('NumpyArrayIterator is set to use the data format convention "' + data_format + '" (channels on axis ' + str(channels_axis) + '), i.e. expected either 1, 3, or 4 channels on axis ' + str(channels_axis) + '. However, it was passed an array with shape ' + str(self.x.shape) + ' (' + str(self.x.shape[channels_axis]) + ' channels).') + if y is not None: + self.y = np.asarray(y) + else: + self.y = None + if sample_weight is not None: + self.sample_weight = np.asarray(sample_weight) + else: + self.sample_weight = None + self.image_data_generator = image_data_generator + self.data_format = data_format + self.save_to_dir = save_to_dir + self.save_prefix = save_prefix + self.save_format = save_format + super().__init__(x.shape[0], batch_size, shuffle, seed) + + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype) + for (i, j) in enumerate(index_array): + x = self.x[j] + params = self.image_data_generator.get_random_transform(x.shape) + x = self.image_data_generator.apply_transform(x.astype(self.dtype), params) + x = self.image_data_generator.standardize(x) + batch_x[i] = x + if self.save_to_dir: + for (i, j) in enumerate(index_array): + img = image_utils.array_to_img(batch_x[i], self.data_format, scale=True) + fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, index=j, hash=np.random.randint(10000.0), format=self.save_format) + img.save(os.path.join(self.save_to_dir, fname)) + batch_x_miscs = [xx[index_array] for xx in self.x_misc] + output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,) + if self.y is None: + return output[0] + output += (self.y[index_array],) + if self.sample_weight is not None: + output += (self.sample_weight[index_array],) + return output + +def validate_filename(filename, white_list_formats): + return filename.lower().endswith(white_list_formats) and os.path.isfile(filename) + +class DataFrameIterator(BatchFromFilesMixin, Iterator): + allowed_class_modes = {'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None} + + def __init__(self, dataframe, directory=None, image_data_generator=None, x_col='filename', y_col='class', weight_col=None, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', keep_aspect_ratio=False, dtype='float32', validate_filenames=True): + super().set_processing_attrs(image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation, keep_aspect_ratio) + df = dataframe.copy() + self.directory = directory or '' + self.class_mode = class_mode + self.dtype = dtype + self._check_params(df, x_col, y_col, weight_col, classes) + if validate_filenames: + df = self._filter_valid_filepaths(df, x_col) + if class_mode not in ['input', 'multi_output', 'raw', None]: + (df, classes) = self._filter_classes(df, y_col, classes) + num_classes = len(classes) + self.class_indices = dict(zip(classes, range(len(classes)))) + if self.split: + num_files = len(df) + start = int(self.split[0] * num_files) + stop = int(self.split[1] * num_files) + df = df.iloc[start:stop, :] + if class_mode not in ['input', 'multi_output', 'raw', None]: + self.classes = self.get_classes(df, y_col) + self.filenames = df[x_col].tolist() + self._sample_weight = df[weight_col].values if weight_col else None + if class_mode == 'multi_output': + self._targets = [np.array(df[col].tolist()) for col in y_col] + if class_mode == 'raw': + self._targets = df[y_col].values + self.samples = len(self.filenames) + validated_string = 'validated' if validate_filenames else 'non-validated' + if class_mode in ['input', 'multi_output', 'raw', None]: + io_utils.print_msg(f'Found {self.samples} {validated_string} image filenames.') + else: + io_utils.print_msg(f'Found {self.samples} {validated_string} image filenames belonging to {num_classes} classes.') + self._filepaths = [os.path.join(self.directory, fname) for fname in self.filenames] + super().__init__(self.samples, batch_size, shuffle, seed) + + def _check_params(self, df, x_col, y_col, weight_col, classes): + if self.class_mode not in self.allowed_class_modes: + raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(self.class_mode, self.allowed_class_modes)) + if self.class_mode == 'multi_output' and (not isinstance(y_col, list)): + raise TypeError('If class_mode="{}", y_col must be a list. Received {}.'.format(self.class_mode, type(y_col).__name__)) + if not all(df[x_col].apply(lambda x: isinstance(x, str))): + raise TypeError(f'All values in column x_col={x_col} must be strings.') + if self.class_mode in {'binary', 'sparse'}: + if not all(df[y_col].apply(lambda x: isinstance(x, str))): + raise TypeError('If class_mode="{}", y_col="{}" column values must be strings.'.format(self.class_mode, y_col)) + if self.class_mode == 'binary': + if classes: + classes = set(classes) + if len(classes) != 2: + raise ValueError('If class_mode="binary" there must be 2 classes. {} class/es were given.'.format(len(classes))) + elif df[y_col].nunique() != 2: + raise ValueError('If class_mode="binary" there must be 2 classes. Found {} classes.'.format(df[y_col].nunique())) + if self.class_mode == 'categorical': + types = (str, list, tuple) + if not all(df[y_col].apply(lambda x: isinstance(x, types))): + raise TypeError('If class_mode="{}", y_col="{}" column values must be type string, list or tuple.'.format(self.class_mode, y_col)) + if classes and self.class_mode in {'input', 'multi_output', 'raw', None}: + warnings.warn('`classes` will be ignored given the class_mode="{}"'.format(self.class_mode)) + if weight_col and (not issubclass(df[weight_col].dtype.type, np.number)): + raise TypeError(f'Column weight_col={weight_col} must be numeric.') + + def get_classes(self, df, y_col): + labels = [] + for label in df[y_col]: + if isinstance(label, (list, tuple)): + labels.append([self.class_indices[lbl] for lbl in label]) + else: + labels.append(self.class_indices[label]) + return labels + + @staticmethod + def _filter_classes(df, y_col, classes): + df = df.copy() + + def remove_classes(labels, classes): + if isinstance(labels, (list, tuple)): + labels = [cls for cls in labels if cls in classes] + return labels or None + elif isinstance(labels, str): + return labels if labels in classes else None + else: + raise TypeError('Expect string, list or tuple but found {} in {} column '.format(type(labels), y_col)) + if classes: + classes = list(collections.OrderedDict.fromkeys(classes).keys()) + df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes)) + else: + classes = set() + for v in df[y_col]: + if isinstance(v, (list, tuple)): + classes.update(v) + else: + classes.add(v) + classes = sorted(classes) + return (df.dropna(subset=[y_col]), classes) + + def _filter_valid_filepaths(self, df, x_col): + filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname)) + mask = filepaths.apply(validate_filename, args=(self.white_list_formats,)) + n_invalid = (~mask).sum() + if n_invalid: + warnings.warn('Found {} invalid image filename(s) in x_col="{}". These filename(s) will be ignored.'.format(n_invalid, x_col)) + return df[mask] + + @property + def filepaths(self): + return self._filepaths + + @property + def labels(self): + if self.class_mode in {'multi_output', 'raw'}: + return self._targets + else: + return self.classes + + @property + def sample_weight(self): + return self._sample_weight + +def flip_axis(x, axis): + x = np.asarray(x).swapaxes(axis, 0) + x = x[::-1, ...] + x = x.swapaxes(0, axis) + return x + +@keras_export('keras._legacy.preprocessing.image.ImageDataGenerator') +class ImageDataGenerator: + + def __init__(self, featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0, interpolation_order=1, dtype=None): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + self.featurewise_center = featurewise_center + self.samplewise_center = samplewise_center + self.featurewise_std_normalization = featurewise_std_normalization + self.samplewise_std_normalization = samplewise_std_normalization + self.zca_whitening = zca_whitening + self.zca_epsilon = zca_epsilon + self.rotation_range = rotation_range + self.width_shift_range = width_shift_range + self.height_shift_range = height_shift_range + self.shear_range = shear_range + self.zoom_range = zoom_range + self.channel_shift_range = channel_shift_range + self.fill_mode = fill_mode + self.cval = cval + self.horizontal_flip = horizontal_flip + self.vertical_flip = vertical_flip + self.rescale = rescale + self.preprocessing_function = preprocessing_function + self.dtype = dtype + self.interpolation_order = interpolation_order + if data_format not in {'channels_last', 'channels_first'}: + raise ValueError(f'`data_format` should be `"channels_last"` (channel after row and column) or `"channels_first"` (channel before row and column). Received: {data_format}') + self.data_format = data_format + if data_format == 'channels_first': + self.channel_axis = 1 + self.row_axis = 2 + self.col_axis = 3 + if data_format == 'channels_last': + self.channel_axis = 3 + self.row_axis = 1 + self.col_axis = 2 + if validation_split and (not 0 < validation_split < 1): + raise ValueError(f'`validation_split` must be strictly between 0 and 1. Received: {validation_split}') + self._validation_split = validation_split + self.mean = None + self.std = None + self.zca_whitening_matrix = None + if isinstance(zoom_range, (float, int)): + self.zoom_range = [1 - zoom_range, 1 + zoom_range] + elif len(zoom_range) == 2 and all((isinstance(val, (float, int)) for val in zoom_range)): + self.zoom_range = [zoom_range[0], zoom_range[1]] + else: + raise ValueError(f'`zoom_range` should be a float or a tuple or list of two floats. Received: {zoom_range}') + if zca_whitening: + if not featurewise_center: + self.featurewise_center = True + warnings.warn('This ImageDataGenerator specifies `zca_whitening`, which overrides setting of `featurewise_center`.') + if featurewise_std_normalization: + self.featurewise_std_normalization = False + warnings.warn('This ImageDataGenerator specifies `zca_whitening` which overrides setting of`featurewise_std_normalization`.') + if featurewise_std_normalization: + if not featurewise_center: + self.featurewise_center = True + warnings.warn('This ImageDataGenerator specifies `featurewise_std_normalization`, which overrides setting of `featurewise_center`.') + if samplewise_std_normalization: + if not samplewise_center: + self.samplewise_center = True + warnings.warn('This ImageDataGenerator specifies `samplewise_std_normalization`, which overrides setting of `samplewise_center`.') + if brightness_range is not None: + if not isinstance(brightness_range, (tuple, list)) or len(brightness_range) != 2: + raise ValueError(f'`brightness_range should be tuple or list of two floats. Received: {brightness_range}') + self.brightness_range = brightness_range + + def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', ignore_class_split=False, subset=None): + return NumpyArrayIterator(x, y, self, batch_size=batch_size, shuffle=shuffle, sample_weight=sample_weight, seed=seed, data_format=self.data_format, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, ignore_class_split=ignore_class_split, subset=subset, dtype=self.dtype) + + def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', keep_aspect_ratio=False): + return DirectoryIterator(directory, self, target_size=target_size, color_mode=color_mode, keep_aspect_ratio=keep_aspect_ratio, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dtype=self.dtype) + + def flow_from_dataframe(self, dataframe, directory=None, x_col='filename', y_col='class', weight_col=None, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', validate_filenames=True, **kwargs): + if 'has_ext' in kwargs: + warnings.warn('has_ext is deprecated, filenames in the dataframe have to match the exact filenames in disk.', DeprecationWarning) + if 'sort' in kwargs: + warnings.warn('sort is deprecated, batches will be created in thesame order than the filenames provided if `shuffle`is set to `False`.', DeprecationWarning) + if class_mode == 'other': + warnings.warn('`class_mode="other"` is deprecated, please use `class_mode="raw"`.', DeprecationWarning) + class_mode = 'raw' + if 'drop_duplicates' in kwargs: + warnings.warn('drop_duplicates is deprecated, you can drop duplicates by using the pandas.DataFrame.drop_duplicates method.', DeprecationWarning) + return DataFrameIterator(dataframe, directory, self, x_col=x_col, y_col=y_col, weight_col=weight_col, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, subset=subset, interpolation=interpolation, validate_filenames=validate_filenames, dtype=self.dtype) + + def standardize(self, x): + if self.preprocessing_function: + x = self.preprocessing_function(x) + if self.rescale: + x *= self.rescale + if self.samplewise_center: + x -= np.mean(x, keepdims=True) + if self.samplewise_std_normalization: + x /= np.std(x, keepdims=True) + 1e-06 + if self.featurewise_center: + if self.mean is not None: + x -= self.mean + else: + warnings.warn("This ImageDataGenerator specifies `featurewise_center`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.") + if self.featurewise_std_normalization: + if self.std is not None: + x /= self.std + 1e-06 + else: + warnings.warn("This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.") + if self.zca_whitening: + if self.zca_whitening_matrix is not None: + flat_x = x.reshape(-1, np.prod(x.shape[-3:])) + white_x = flat_x @ self.zca_whitening_matrix + x = np.reshape(white_x, x.shape) + else: + warnings.warn("This ImageDataGenerator specifies `zca_whitening`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.") + return x + + def get_random_transform(self, img_shape, seed=None): + img_row_axis = self.row_axis - 1 + img_col_axis = self.col_axis - 1 + if seed is not None: + np.random.seed(seed) + if self.rotation_range: + theta = np.random.uniform(-self.rotation_range, self.rotation_range) + else: + theta = 0 + if self.height_shift_range: + try: + tx = np.random.choice(self.height_shift_range) + tx *= np.random.choice([-1, 1]) + except ValueError: + tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) + if np.max(self.height_shift_range) < 1: + tx *= img_shape[img_row_axis] + else: + tx = 0 + if self.width_shift_range: + try: + ty = np.random.choice(self.width_shift_range) + ty *= np.random.choice([-1, 1]) + except ValueError: + ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) + if np.max(self.width_shift_range) < 1: + ty *= img_shape[img_col_axis] + else: + ty = 0 + if self.shear_range: + shear = np.random.uniform(-self.shear_range, self.shear_range) + else: + shear = 0 + if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: + (zx, zy) = (1, 1) + else: + (zx, zy) = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2) + flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip + flip_vertical = (np.random.random() < 0.5) * self.vertical_flip + channel_shift_intensity = None + if self.channel_shift_range != 0: + channel_shift_intensity = np.random.uniform(-self.channel_shift_range, self.channel_shift_range) + brightness = None + if self.brightness_range is not None: + brightness = np.random.uniform(self.brightness_range[0], self.brightness_range[1]) + transform_parameters = {'theta': theta, 'tx': tx, 'ty': ty, 'shear': shear, 'zx': zx, 'zy': zy, 'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} + return transform_parameters + + def apply_transform(self, x, transform_parameters): + img_row_axis = self.row_axis - 1 + img_col_axis = self.col_axis - 1 + img_channel_axis = self.channel_axis - 1 + x = apply_affine_transform(x, transform_parameters.get('theta', 0), transform_parameters.get('tx', 0), transform_parameters.get('ty', 0), transform_parameters.get('shear', 0), transform_parameters.get('zx', 1), transform_parameters.get('zy', 1), row_axis=img_row_axis, col_axis=img_col_axis, channel_axis=img_channel_axis, fill_mode=self.fill_mode, cval=self.cval, order=self.interpolation_order) + if transform_parameters.get('channel_shift_intensity') is not None: + x = apply_channel_shift(x, transform_parameters['channel_shift_intensity'], img_channel_axis) + if transform_parameters.get('flip_horizontal', False): + x = flip_axis(x, img_col_axis) + if transform_parameters.get('flip_vertical', False): + x = flip_axis(x, img_row_axis) + if transform_parameters.get('brightness') is not None: + x = apply_brightness_shift(x, transform_parameters['brightness'], False) + return x + + def random_transform(self, x, seed=None): + params = self.get_random_transform(x.shape, seed) + return self.apply_transform(x, params) + + def fit(self, x, augment=False, rounds=1, seed=None): + x = np.asarray(x, dtype=self.dtype) + if x.ndim != 4: + raise ValueError('Input to `.fit()` should have rank 4. Got array with shape: ' + str(x.shape)) + if x.shape[self.channel_axis] not in {1, 3, 4}: + warnings.warn('Expected input to be images (as Numpy array) following the data format convention "' + self.data_format + '" (channels on axis ' + str(self.channel_axis) + '), i.e. expected either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. However, it was passed an array with shape ' + str(x.shape) + ' (' + str(x.shape[self.channel_axis]) + ' channels).') + if seed is not None: + np.random.seed(seed) + x = np.copy(x) + if self.rescale: + x *= self.rescale + if augment: + ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype) + for r in range(rounds): + for i in range(x.shape[0]): + ax[i + r * x.shape[0]] = self.random_transform(x[i]) + x = ax + if self.featurewise_center: + self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis)) + broadcast_shape = [1, 1, 1] + broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] + self.mean = np.reshape(self.mean, broadcast_shape) + x -= self.mean + if self.featurewise_std_normalization: + self.std = np.std(x, axis=(0, self.row_axis, self.col_axis)) + broadcast_shape = [1, 1, 1] + broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] + self.std = np.reshape(self.std, broadcast_shape) + x /= self.std + 1e-06 + if self.zca_whitening: + n = len(x) + flat_x = np.reshape(x, (n, -1)) + (u, s, _) = np.linalg.svd(flat_x.T, full_matrices=False) + s_inv = np.sqrt(n) / (s + self.zca_epsilon) + self.zca_whitening_matrix = (u * s_inv).dot(u.T) + +@keras_export('keras._legacy.preprocessing.image.random_rotation') +def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0, interpolation_order=1): + theta = np.random.uniform(-rg, rg) + x = apply_affine_transform(x, theta=theta, row_axis=row_axis, col_axis=col_axis, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) + return x + +@keras_export('keras._legacy.preprocessing.image.random_shift') +def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0, interpolation_order=1): + (h, w) = (x.shape[row_axis], x.shape[col_axis]) + tx = np.random.uniform(-hrg, hrg) * h + ty = np.random.uniform(-wrg, wrg) * w + x = apply_affine_transform(x, tx=tx, ty=ty, row_axis=row_axis, col_axis=col_axis, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) + return x + +@keras_export('keras._legacy.preprocessing.image.random_shear') +def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0, interpolation_order=1): + shear = np.random.uniform(-intensity, intensity) + x = apply_affine_transform(x, shear=shear, row_axis=row_axis, col_axis=col_axis, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) + return x + +@keras_export('keras._legacy.preprocessing.image.random_zoom') +def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0, interpolation_order=1): + if len(zoom_range) != 2: + raise ValueError(f'`zoom_range` should be a tuple or list of two floats. Received: {zoom_range}') + if zoom_range[0] == 1 and zoom_range[1] == 1: + (zx, zy) = (1, 1) + else: + (zx, zy) = np.random.uniform(zoom_range[0], zoom_range[1], 2) + x = apply_affine_transform(x, zx=zx, zy=zy, row_axis=row_axis, col_axis=col_axis, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) + return x + +@keras_export('keras._legacy.preprocessing.image.apply_channel_shift') +def apply_channel_shift(x, intensity, channel_axis=0): + x = np.rollaxis(x, channel_axis, 0) + (min_x, max_x) = (np.min(x), np.max(x)) + channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in x] + x = np.stack(channel_images, axis=0) + x = np.rollaxis(x, 0, channel_axis + 1) + return x + +@keras_export('keras._legacy.preprocessing.image.random_channel_shift') +def random_channel_shift(x, intensity_range, channel_axis=0): + intensity = np.random.uniform(-intensity_range, intensity_range) + return apply_channel_shift(x, intensity, channel_axis=channel_axis) + +@keras_export('keras._legacy.preprocessing.image.apply_brightness_shift') +def apply_brightness_shift(x, brightness, scale=True): + from PIL import ImageEnhance + (x_min, x_max) = (np.min(x), np.max(x)) + local_scale = x_min < 0 or x_max > 255 + x = image_utils.array_to_img(x, scale=local_scale or scale) + x = imgenhancer_Brightness = ImageEnhance.Brightness(x) + x = imgenhancer_Brightness.enhance(brightness) + x = image_utils.img_to_array(x) + if not scale and local_scale: + x = x / 255 * (x_max - x_min) + x_min + return x + +@keras_export('keras._legacy.preprocessing.image.random_brightness') +def random_brightness(x, brightness_range, scale=True): + if len(brightness_range) != 2: + raise ValueError(f'`brightness_range should be tuple or list of two floats. Received: {brightness_range}') + u = np.random.uniform(brightness_range[0], brightness_range[1]) + return apply_brightness_shift(x, u, scale) + +def transform_matrix_offset_center(matrix, x, y): + o_x = float(x) / 2 - 0.5 + o_y = float(y) / 2 - 0.5 + offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) + reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) + transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) + return transform_matrix + +@keras_export('keras._legacy.preprocessing.image.apply_affine_transform') +def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0, order=1): + if np.unique([row_axis, col_axis, channel_axis]).size != 3: + raise ValueError("'row_axis', 'col_axis', and 'channel_axis' must be distinct") + valid_indices = set([0, 1, 2]) + actual_indices = set([row_axis, col_axis, channel_axis]) + if actual_indices != valid_indices: + raise ValueError(f"Invalid axis' indices: {actual_indices - valid_indices}") + if x.ndim != 3: + raise ValueError('Input arrays must be multi-channel 2D images.') + if channel_axis not in [0, 2]: + raise ValueError('Channels are allowed and the first and last dimensions.') + transform_matrix = None + if theta != 0: + theta = np.deg2rad(theta) + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) + transform_matrix = rotation_matrix + if tx != 0 or ty != 0: + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + if transform_matrix is None: + transform_matrix = shift_matrix + else: + transform_matrix = np.dot(transform_matrix, shift_matrix) + if shear != 0: + shear = np.deg2rad(shear) + shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) + if transform_matrix is None: + transform_matrix = shear_matrix + else: + transform_matrix = np.dot(transform_matrix, shear_matrix) + if zx != 1 or zy != 1: + zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) + if transform_matrix is None: + transform_matrix = zoom_matrix + else: + transform_matrix = np.dot(transform_matrix, zoom_matrix) + if transform_matrix is not None: + (h, w) = (x.shape[row_axis], x.shape[col_axis]) + transform_matrix = transform_matrix_offset_center(transform_matrix, h, w) + x = np.rollaxis(x, channel_axis, 0) + if col_axis > row_axis: + transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]] + transform_matrix[[0, 1]] = transform_matrix[[1, 0]] + final_affine_matrix = transform_matrix[:2, :2] + final_offset = transform_matrix[:2, 2] + channel_images = [scipy.ndimage.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval) for x_channel in x] + x = np.stack(channel_images, axis=0) + x = np.rollaxis(x, 0, channel_axis + 1) + return x + +# File: keras-master/keras/src/legacy/preprocessing/sequence.py +"""""" +import json +import random +import numpy as np +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset + +@keras_export('keras._legacy.preprocessing.sequence.TimeseriesGenerator') +class TimeseriesGenerator(PyDataset): + + def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, end_index=None, shuffle=False, reverse=False, batch_size=128): + if len(data) != len(targets): + raise ValueError(f'Data and targets have to be of same length. Data length is {len(data)} while target length is {len(targets)}') + self.data = data + self.targets = targets + self.length = length + self.sampling_rate = sampling_rate + self.stride = stride + self.start_index = start_index + length + if end_index is None: + end_index = len(data) - 1 + self.end_index = end_index + self.shuffle = shuffle + self.reverse = reverse + self.batch_size = batch_size + if self.start_index > self.end_index: + raise ValueError(f'`start_index+length={self.start_index} > end_index={self.end_index}` is disallowed, as no part of the sequence would be left to be used as current step.') + + def __len__(self): + return (self.end_index - self.start_index + self.batch_size * self.stride) // (self.batch_size * self.stride) + + def __getitem__(self, index): + if self.shuffle: + rows = np.random.randint(self.start_index, self.end_index + 1, size=self.batch_size) + else: + i = self.start_index + self.batch_size * self.stride * index + rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1), self.stride) + samples = np.array([self.data[row - self.length:row:self.sampling_rate] for row in rows]) + targets = np.array([self.targets[row] for row in rows]) + if self.reverse: + return (samples[:, ::-1, ...], targets) + return (samples, targets) + + def get_config(self): + data = self.data + if type(self.data).__module__ == np.__name__: + data = self.data.tolist() + try: + json_data = json.dumps(data) + except TypeError as e: + raise TypeError(f'Data not JSON Serializable: {data}') from e + targets = self.targets + if type(self.targets).__module__ == np.__name__: + targets = self.targets.tolist() + try: + json_targets = json.dumps(targets) + except TypeError as e: + raise TypeError(f'Targets not JSON Serializable: {targets}') from e + return {'data': json_data, 'targets': json_targets, 'length': self.length, 'sampling_rate': self.sampling_rate, 'stride': self.stride, 'start_index': self.start_index, 'end_index': self.end_index, 'shuffle': self.shuffle, 'reverse': self.reverse, 'batch_size': self.batch_size} + + def to_json(self, **kwargs): + config = self.get_config() + timeseries_generator_config = {'class_name': self.__class__.__name__, 'config': config} + return json.dumps(timeseries_generator_config, **kwargs) + +@keras_export('keras._legacy.preprocessing.sequence.make_sampling_table') +def make_sampling_table(size, sampling_factor=1e-05): + gamma = 0.577 + rank = np.arange(size) + rank[0] = 1 + inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1.0 / (12.0 * rank) + f = sampling_factor * inv_fq + return np.minimum(1.0, f / np.sqrt(f)) + +@keras_export('keras._legacy.preprocessing.sequence.skipgrams') +def skipgrams(sequence, vocabulary_size, window_size=4, negative_samples=1.0, shuffle=True, categorical=False, sampling_table=None, seed=None): + couples = [] + labels = [] + for (i, wi) in enumerate(sequence): + if not wi: + continue + if sampling_table is not None: + if sampling_table[wi] < random.random(): + continue + window_start = max(0, i - window_size) + window_end = min(len(sequence), i + window_size + 1) + for j in range(window_start, window_end): + if j != i: + wj = sequence[j] + if not wj: + continue + couples.append([wi, wj]) + if categorical: + labels.append([0, 1]) + else: + labels.append(1) + if negative_samples > 0: + num_negative_samples = int(len(labels) * negative_samples) + words = [c[0] for c in couples] + random.shuffle(words) + couples += [[words[i % len(words)], random.randint(1, vocabulary_size - 1)] for i in range(num_negative_samples)] + if categorical: + labels += [[1, 0]] * num_negative_samples + else: + labels += [0] * num_negative_samples + if shuffle: + if seed is None: + seed = random.randint(0, 10000000.0) + random.seed(seed) + random.shuffle(couples) + random.seed(seed) + random.shuffle(labels) + return (couples, labels) + +# File: keras-master/keras/src/legacy/preprocessing/text.py +"""""" +import collections +import hashlib +import json +import warnings +import numpy as np +from keras.src.api_export import keras_export + +@keras_export('keras._legacy.preprocessing.text.text_to_word_sequence') +def text_to_word_sequence(input_text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' '): + if lower: + input_text = input_text.lower() + translate_dict = {c: split for c in filters} + translate_map = str.maketrans(translate_dict) + input_text = input_text.translate(translate_map) + seq = input_text.split(split) + return [i for i in seq if i] + +@keras_export('keras._legacy.preprocessing.text.one_hot') +def one_hot(input_text, n, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', analyzer=None): + return hashing_trick(input_text, n, hash_function=hash, filters=filters, lower=lower, split=split, analyzer=analyzer) + +@keras_export('keras._legacy.preprocessing.text.hashing_trick') +def hashing_trick(text, n, hash_function=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', analyzer=None): + if hash_function is None: + hash_function = hash + elif hash_function == 'md5': + + def hash_function(w): + return int(hashlib.md5(w.encode()).hexdigest(), 16) + if analyzer is None: + seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split) + else: + seq = analyzer(text) + return [hash_function(w) % (n - 1) + 1 for w in seq] + +@keras_export('keras._legacy.preprocessing.text.Tokenizer') +class Tokenizer: + + def __init__(self, num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', char_level=False, oov_token=None, analyzer=None, **kwargs): + if 'nb_words' in kwargs: + warnings.warn('The `nb_words` argument in `Tokenizer` has been renamed `num_words`.') + num_words = kwargs.pop('nb_words') + document_count = kwargs.pop('document_count', 0) + if kwargs: + raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) + self.word_counts = collections.OrderedDict() + self.word_docs = collections.defaultdict(int) + self.filters = filters + self.split = split + self.lower = lower + self.num_words = num_words + self.document_count = document_count + self.char_level = char_level + self.oov_token = oov_token + self.index_docs = collections.defaultdict(int) + self.word_index = {} + self.index_word = {} + self.analyzer = analyzer + + def fit_on_texts(self, texts): + for text in texts: + self.document_count += 1 + if self.char_level or isinstance(text, list): + if self.lower: + if isinstance(text, list): + text = [text_elem.lower() for text_elem in text] + else: + text = text.lower() + seq = text + elif self.analyzer is None: + seq = text_to_word_sequence(text, filters=self.filters, lower=self.lower, split=self.split) + else: + seq = self.analyzer(text) + for w in seq: + if w in self.word_counts: + self.word_counts[w] += 1 + else: + self.word_counts[w] = 1 + for w in set(seq): + self.word_docs[w] += 1 + wcounts = list(self.word_counts.items()) + wcounts.sort(key=lambda x: x[1], reverse=True) + if self.oov_token is None: + sorted_voc = [] + else: + sorted_voc = [self.oov_token] + sorted_voc.extend((wc[0] for wc in wcounts)) + self.word_index = dict(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))) + self.index_word = {c: w for (w, c) in self.word_index.items()} + for (w, c) in list(self.word_docs.items()): + self.index_docs[self.word_index[w]] = c + + def fit_on_sequences(self, sequences): + self.document_count += len(sequences) + for seq in sequences: + seq = set(seq) + for i in seq: + self.index_docs[i] += 1 + + def texts_to_sequences(self, texts): + return list(self.texts_to_sequences_generator(texts)) + + def texts_to_sequences_generator(self, texts): + num_words = self.num_words + oov_token_index = self.word_index.get(self.oov_token) + for text in texts: + if self.char_level or isinstance(text, list): + if self.lower: + if isinstance(text, list): + text = [text_elem.lower() for text_elem in text] + else: + text = text.lower() + seq = text + elif self.analyzer is None: + seq = text_to_word_sequence(text, filters=self.filters, lower=self.lower, split=self.split) + else: + seq = self.analyzer(text) + vect = [] + for w in seq: + i = self.word_index.get(w) + if i is not None: + if num_words and i >= num_words: + if oov_token_index is not None: + vect.append(oov_token_index) + else: + vect.append(i) + elif self.oov_token is not None: + vect.append(oov_token_index) + yield vect + + def sequences_to_texts(self, sequences): + return list(self.sequences_to_texts_generator(sequences)) + + def sequences_to_texts_generator(self, sequences): + num_words = self.num_words + oov_token_index = self.word_index.get(self.oov_token) + for seq in sequences: + vect = [] + for num in seq: + word = self.index_word.get(num) + if word is not None: + if num_words and num >= num_words: + if oov_token_index is not None: + vect.append(self.index_word[oov_token_index]) + else: + vect.append(word) + elif self.oov_token is not None: + vect.append(self.index_word[oov_token_index]) + vect = ' '.join(vect) + yield vect + + def texts_to_matrix(self, texts, mode='binary'): + sequences = self.texts_to_sequences(texts) + return self.sequences_to_matrix(sequences, mode=mode) + + def sequences_to_matrix(self, sequences, mode='binary'): + if not self.num_words: + if self.word_index: + num_words = len(self.word_index) + 1 + else: + raise ValueError('Specify a dimension (`num_words` argument), or fit on some text data first.') + else: + num_words = self.num_words + if mode == 'tfidf' and (not self.document_count): + raise ValueError('Fit the Tokenizer on some data before using tfidf mode.') + x = np.zeros((len(sequences), num_words)) + for (i, seq) in enumerate(sequences): + if not seq: + continue + counts = collections.defaultdict(int) + for j in seq: + if j >= num_words: + continue + counts[j] += 1 + for (j, c) in list(counts.items()): + if mode == 'count': + x[i][j] = c + elif mode == 'freq': + x[i][j] = c / len(seq) + elif mode == 'binary': + x[i][j] = 1 + elif mode == 'tfidf': + tf = 1 + np.log(c) + idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0))) + x[i][j] = tf * idf + else: + raise ValueError('Unknown vectorization mode:', mode) + return x + + def get_config(self): + json_word_counts = json.dumps(self.word_counts) + json_word_docs = json.dumps(self.word_docs) + json_index_docs = json.dumps(self.index_docs) + json_word_index = json.dumps(self.word_index) + json_index_word = json.dumps(self.index_word) + return {'num_words': self.num_words, 'filters': self.filters, 'lower': self.lower, 'split': self.split, 'char_level': self.char_level, 'oov_token': self.oov_token, 'document_count': self.document_count, 'word_counts': json_word_counts, 'word_docs': json_word_docs, 'index_docs': json_index_docs, 'index_word': json_index_word, 'word_index': json_word_index} + + def to_json(self, **kwargs): + config = self.get_config() + tokenizer_config = {'class_name': self.__class__.__name__, 'config': config} + return json.dumps(tokenizer_config, **kwargs) + +@keras_export('keras._legacy.preprocessing.text.tokenizer_from_json') +def tokenizer_from_json(json_string): + tokenizer_config = json.loads(json_string) + config = tokenizer_config.get('config') + word_counts = json.loads(config.pop('word_counts')) + word_docs = json.loads(config.pop('word_docs')) + index_docs = json.loads(config.pop('index_docs')) + index_docs = {int(k): v for (k, v) in index_docs.items()} + index_word = json.loads(config.pop('index_word')) + index_word = {int(k): v for (k, v) in index_word.items()} + word_index = json.loads(config.pop('word_index')) + tokenizer = Tokenizer(**config) + tokenizer.word_counts = word_counts + tokenizer.word_docs = word_docs + tokenizer.index_docs = index_docs + tokenizer.word_index = word_index + tokenizer.index_word = index_word + return tokenizer + +# File: keras-master/keras/src/legacy/saving/json_utils.py +"""""" +import collections +import enum +import functools +import json +import numpy as np +from keras.src.legacy.saving import serialization +from keras.src.saving import serialization_lib +from keras.src.utils.module_utils import tensorflow as tf +_EXTENSION_TYPE_SPEC = '_EXTENSION_TYPE_SPEC' + +class Encoder(json.JSONEncoder): + + def default(self, obj): + if tf.available and isinstance(obj, tf.TensorShape): + items = obj.as_list() if obj.rank is not None else None + return {'class_name': 'TensorShape', 'items': items} + return get_json_type(obj) + + def encode(self, obj): + return super().encode(_encode_tuple(obj)) + +def _encode_tuple(x): + if isinstance(x, tuple): + return {'class_name': '__tuple__', 'items': tuple((_encode_tuple(i) for i in x))} + elif isinstance(x, list): + return [_encode_tuple(i) for i in x] + elif isinstance(x, dict): + return {key: _encode_tuple(value) for (key, value) in x.items()} + else: + return x + +def decode(json_string): + return json.loads(json_string, object_hook=_decode_helper) + +def decode_and_deserialize(json_string, module_objects=None, custom_objects=None): + return json.loads(json_string, object_hook=functools.partial(_decode_helper, deserialize=True, module_objects=module_objects, custom_objects=custom_objects)) + +def _decode_helper(obj, deserialize=False, module_objects=None, custom_objects=None): + if isinstance(obj, dict) and 'class_name' in obj: + if tf.available: + if obj['class_name'] == 'TensorShape': + return tf.TensorShape(obj['items']) + elif obj['class_name'] == 'TypeSpec': + from tensorflow.python.framework import type_spec_registry + return type_spec_registry.lookup(obj['type_spec'])._deserialize(_decode_helper(obj['serialized'])) + elif obj['class_name'] == 'CompositeTensor': + spec = obj['spec'] + tensors = [] + for (dtype, tensor) in obj['tensors']: + tensors.append(tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype))) + return tf.nest.pack_sequence_as(_decode_helper(spec), tensors, expand_composites=True) + if obj['class_name'] == '__tuple__': + return tuple((_decode_helper(i) for i in obj['items'])) + elif obj['class_name'] == '__ellipsis__': + return Ellipsis + elif deserialize and '__passive_serialization__' in obj: + try: + if 'module' not in obj: + return serialization.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects) + else: + return serialization_lib.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects) + except ValueError: + pass + elif obj['class_name'] == '__bytes__': + return obj['value'].encode('utf-8') + return obj + +def get_json_type(obj): + if hasattr(obj, 'get_config'): + serialized = serialization.serialize_keras_object(obj) + serialized['__passive_serialization__'] = True + return serialized + if type(obj).__module__ == np.__name__: + if isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj.item() + if callable(obj): + return obj.__name__ + if type(obj).__name__ == type.__name__: + return obj.__name__ + if tf.available and isinstance(obj, tf.compat.v1.Dimension): + return obj.value + if tf.available and isinstance(obj, tf.TensorShape): + return obj.as_list() + if tf.available and isinstance(obj, tf.DType): + return obj.name + if isinstance(obj, collections.abc.Mapping): + return dict(obj) + if obj is Ellipsis: + return {'class_name': '__ellipsis__'} + if tf.available and isinstance(obj, tf.TypeSpec): + from tensorflow.python.framework import type_spec_registry + try: + type_spec_name = type_spec_registry.get_name(type(obj)) + return {'class_name': 'TypeSpec', 'type_spec': type_spec_name, 'serialized': obj._serialize()} + except ValueError: + raise ValueError(f'Unable to serialize {obj} to JSON, because the TypeSpec class {type(obj)} has not been registered.') + if tf.available and isinstance(obj, tf.__internal__.CompositeTensor): + spec = tf.type_spec_from_value(obj) + tensors = [] + for tensor in tf.nest.flatten(obj, expand_composites=True): + tensors.append((tensor.dtype.name, tensor.numpy().tolist())) + return {'class_name': 'CompositeTensor', 'spec': get_json_type(spec), 'tensors': tensors} + if isinstance(obj, enum.Enum): + return obj.value + if isinstance(obj, bytes): + return {'class_name': '__bytes__', 'value': obj.decode('utf-8')} + raise TypeError(f'Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}.') + +# File: keras-master/keras/src/legacy/saving/legacy_h5_format.py +import json +import os +import warnings +import numpy as np +from absl import logging +from keras.src import backend +from keras.src import optimizers +from keras.src.backend.common import global_state +from keras.src.legacy.saving import json_utils +from keras.src.legacy.saving import saving_options +from keras.src.legacy.saving import saving_utils +from keras.src.saving import object_registration +from keras.src.utils import io_utils +try: + import h5py +except ImportError: + h5py = None +HDF5_OBJECT_HEADER_LIMIT = 64512 + +def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): + if h5py is None: + raise ImportError('`save_model()` using h5 format requires h5py. Could not import h5py.') + if not isinstance(filepath, h5py.File): + if not overwrite and os.path.isfile(filepath): + proceed = io_utils.ask_to_proceed_with_overwrite(filepath) + if not proceed: + return + dirpath = os.path.dirname(filepath) + if dirpath and (not os.path.exists(dirpath)): + os.makedirs(dirpath, exist_ok=True) + f = h5py.File(filepath, mode='w') + opened_new_file = True + else: + f = filepath + opened_new_file = False + try: + with saving_options.keras_option_scope(use_legacy_config=True): + model_metadata = saving_utils.model_metadata(model, include_optimizer) + for (k, v) in model_metadata.items(): + if isinstance(v, (dict, list, tuple)): + f.attrs[k] = json.dumps(v, default=json_utils.get_json_type).encode('utf8') + else: + f.attrs[k] = v + model_weights_group = f.create_group('model_weights') + save_weights_to_hdf5_group(model_weights_group, model) + if include_optimizer and hasattr(model, 'optimizer'): + save_optimizer_weights_to_hdf5_group(f, model.optimizer) + f.flush() + finally: + if opened_new_file: + f.close() + +def load_model_from_hdf5(filepath, custom_objects=None, compile=True): + if h5py is None: + raise ImportError('`load_model()` using h5 format requires h5py. Could not import h5py.') + if not custom_objects: + custom_objects = {} + gco = object_registration.GLOBAL_CUSTOM_OBJECTS + tlco = global_state.get_global_attribute('custom_objects_scope_dict', {}) + custom_objects = {**custom_objects, **gco, **tlco} + opened_new_file = not isinstance(filepath, h5py.File) + if opened_new_file: + f = h5py.File(filepath, mode='r') + else: + f = filepath + model = None + try: + model_config = f.attrs.get('model_config') + if model_config is None: + raise ValueError(f'No model config found in the file at {filepath}.') + if hasattr(model_config, 'decode'): + model_config = model_config.decode('utf-8') + model_config = json_utils.decode(model_config) + with saving_options.keras_option_scope(use_legacy_config=True): + model = saving_utils.model_from_config(model_config, custom_objects=custom_objects) + load_weights_from_hdf5_group(f['model_weights'], model) + if compile: + training_config = f.attrs.get('training_config') + if hasattr(training_config, 'decode'): + training_config = training_config.decode('utf-8') + if training_config is None: + logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.') + return model + training_config = json_utils.decode(training_config) + model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects)) + saving_utils.try_build_compiled_arguments(model) + if 'optimizer_weights' in f: + try: + if isinstance(model.optimizer, optimizers.Optimizer): + model.optimizer.build(model._trainable_variables) + else: + model.optimizer._create_all_weights(model._trainable_variables) + except (NotImplementedError, AttributeError): + logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.') + optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f) + try: + model.optimizer.set_weights(optimizer_weight_values) + except ValueError: + logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.') + finally: + if opened_new_file: + f.close() + return model + +def save_weights_to_hdf5_group(f, model): + from keras.src import __version__ as keras_version + save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in model.layers]) + f.attrs['backend'] = backend.backend().encode('utf8') + f.attrs['keras_version'] = str(keras_version).encode('utf8') + for layer in sorted(model.layers, key=lambda x: x.name): + g = f.create_group(layer.name) + weights = _legacy_weights(layer) + save_subset_weights_to_hdf5_group(g, weights) + weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights)) + g = f.create_group('top_level_model_weights') + save_subset_weights_to_hdf5_group(g, weights) + +def save_subset_weights_to_hdf5_group(f, weights): + weight_values = [backend.convert_to_numpy(w) for w in weights] + weight_names = [str(w.path).encode('utf8') for w in weights] + save_attributes_to_hdf5_group(f, 'weight_names', weight_names) + for (name, val) in zip(weight_names, weight_values): + param_dset = f.create_dataset(name, val.shape, dtype=val.dtype) + if not val.shape: + param_dset[()] = val + else: + param_dset[:] = val + +def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): + if isinstance(optimizer, optimizers.Optimizer): + symbolic_weights = optimizer.variables + else: + symbolic_weights = getattr(optimizer, 'weights') + if symbolic_weights: + weights_group = hdf5_group.create_group('optimizer_weights') + weight_names = [str(w.path).encode('utf8') for w in symbolic_weights] + save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names) + weight_values = [backend.convert_to_numpy(w) for w in symbolic_weights] + for (name, val) in zip(weight_names, weight_values): + param_dset = weights_group.create_dataset(name, val.shape, dtype=val.dtype) + if not val.shape: + param_dset[()] = val + else: + param_dset[:] = val + +def save_attributes_to_hdf5_group(group, name, data): + bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] + if bad_attributes: + raise RuntimeError(f'The following attributes cannot be saved to HDF5 file because they are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}') + data_npy = np.asarray(data) + num_chunks = 1 + chunked_data = np.array_split(data_npy, num_chunks) + while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)): + num_chunks += 1 + chunked_data = np.array_split(data_npy, num_chunks) + if num_chunks > 1: + for (chunk_id, chunk_data) in enumerate(chunked_data): + group.attrs['%s%d' % (name, chunk_id)] = chunk_data + else: + group.attrs[name] = data + +def load_weights_from_hdf5_group(f, model): + if 'keras_version' in f.attrs: + original_keras_version = f.attrs['keras_version'] + if hasattr(original_keras_version, 'decode'): + original_keras_version = original_keras_version.decode('utf8') + else: + original_keras_version = '1' + if 'backend' in f.attrs: + original_backend = f.attrs['backend'] + if hasattr(original_backend, 'decode'): + original_backend = original_backend.decode('utf8') + else: + original_backend = None + filtered_layers = [] + for layer in model.layers: + weights = _legacy_weights(layer) + if weights: + filtered_layers.append(layer) + layer_names = load_attributes_from_hdf5_group(f, 'layer_names') + filtered_layer_names = [] + for name in layer_names: + g = f[name] + weight_names = load_attributes_from_hdf5_group(g, 'weight_names') + if weight_names: + filtered_layer_names.append(name) + layer_names = filtered_layer_names + if len(layer_names) != len(filtered_layers): + raise ValueError(f'Layer count mismatch when loading weights from file. Model expected {len(filtered_layers)} layers, found {len(layer_names)} saved layers.') + for (k, name) in enumerate(layer_names): + g = f[name] + layer = filtered_layers[k] + symbolic_weights = _legacy_weights(layer) + weight_values = load_subset_weights_from_hdf5_group(g) + if len(weight_values) != len(symbolic_weights): + raise ValueError(f'Weight count mismatch for layer #{k} (named {layer.name} in the current model, {name} in the save file). Layer expects {len(symbolic_weights)} weight(s). Received {len(weight_values)} saved weight(s)') + _set_weights(layer, symbolic_weights, weight_values, name=f'layer #{k} (named {layer.name})') + if 'top_level_model_weights' in f: + symbolic_weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights)) + weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights']) + if len(weight_values) != len(symbolic_weights): + raise ValueError(f'Weight count mismatch for top-level weights when loading weights from file. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)') + _set_weights(model, symbolic_weights, weight_values, name='top-level model') + +def _set_weights(instance, symbolic_weights, weight_values, name, skip_mismatch=False): + for (i, weight_value) in enumerate(weight_values): + expected_shape = symbolic_weights[i].shape + received_shape = weight_value.shape + if expected_shape != received_shape: + if skip_mismatch: + warnings.warn(f'Skipping loading weights for {name}due to mismatch in shape for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}', stacklevel=2) + continue + raise ValueError(f'Shape mismatch in {name}for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}') + symbolic_weights[i].assign(weight_value) + if hasattr(instance, 'finalize_state') and symbolic_weights: + instance.finalize_state() + +def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False): + if 'keras_version' in f.attrs: + original_keras_version = f.attrs['keras_version'] + if hasattr(original_keras_version, 'decode'): + original_keras_version = original_keras_version.decode('utf8') + else: + original_keras_version = '1' + if 'backend' in f.attrs: + original_backend = f.attrs['backend'] + if hasattr(original_backend, 'decode'): + original_backend = original_backend.decode('utf8') + else: + original_backend = None + layer_names = load_attributes_from_hdf5_group(f, 'layer_names') + index = {} + for layer in model.layers: + if layer.name: + index.setdefault(layer.name, []).append(layer) + for (k, name) in enumerate(layer_names): + g = f[name] + weight_values = load_subset_weights_from_hdf5_group(g) + for layer in index.get(name, []): + symbolic_weights = _legacy_weights(layer) + if len(weight_values) != len(symbolic_weights): + if skip_mismatch: + warnings.warn(f'Skipping loading of weights for layer #{k} (named {layer.name}) due to mismatch in number of weights. Layer expects {len(symbolic_weights)} weight(s). Received {len(weight_values)} saved weight(s)', stacklevel=2) + continue + raise ValueError(f'Weight count mismatch for layer #{k} (named {layer.name}). Layer expects {len(symbolic_weights)} weight(s). Received {len(weight_values)} saved weight(s)') + _set_weights(layer, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name=f'layer #{k} (named {layer.name})') + if 'top_level_model_weights' in f: + symbolic_weights = model.trainable_weights + model.non_trainable_weights + weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights']) + if len(weight_values) != len(symbolic_weights): + if skip_mismatch: + warnings.warn(f'Skipping loading top-level weights for model due to mismatch in number of weights. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)', stacklevel=2) + else: + raise ValueError(f'Weight count mismatch for top-level weights of model. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)') + else: + _set_weights(model, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name='top-level model') + +def load_subset_weights_from_hdf5_group(f): + weight_names = load_attributes_from_hdf5_group(f, 'weight_names') + return [np.asarray(f[weight_name]) for weight_name in weight_names] + +def load_optimizer_weights_from_hdf5_group(hdf5_group): + weights_group = hdf5_group['optimizer_weights'] + optimizer_weight_names = load_attributes_from_hdf5_group(weights_group, 'weight_names') + return [weights_group[weight_name] for weight_name in optimizer_weight_names] + +def load_attributes_from_hdf5_group(group, name): + if name in group.attrs: + data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]] + else: + data = [] + chunk_id = 0 + while f'{name}{chunk_id}' in group.attrs: + data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[f'{name}{chunk_id}']]) + chunk_id += 1 + return data + +def _legacy_weights(layer): + return layer.trainable_weights + layer.non_trainable_weights + +# File: keras-master/keras/src/legacy/saving/saving_options.py +import contextlib +from keras.src.backend.common import global_state + +@contextlib.contextmanager +def keras_option_scope(use_legacy_config=True): + use_legacy_config_prev_value = global_state.get_global_attribute('use_legacy_config', None) + global_state.set_global_attribute('use_legacy_config', use_legacy_config) + try: + yield + finally: + global_state.set_global_attribute('use_legacy_config', use_legacy_config_prev_value) + +# File: keras-master/keras/src/legacy/saving/saving_utils.py +import json +import threading +from absl import logging +from keras.src import backend +from keras.src import layers +from keras.src import losses +from keras.src import metrics as metrics_module +from keras.src import models +from keras.src import optimizers +from keras.src import tree +from keras.src.legacy.saving import serialization +from keras.src.saving import object_registration +MODULE_OBJECTS = threading.local() +LAMBDA_DEP_ARGS = ('module', 'function_type', 'output_shape_type', 'output_shape_module') + +def model_from_config(config, custom_objects=None): + if isinstance(config, list): + raise TypeError(f'`model_from_config` expects a dictionary, not a list. Received: config={config}. Did you meant to use `Sequential.from_config(config)`?') + global MODULE_OBJECTS + if not hasattr(MODULE_OBJECTS, 'ALL_OBJECTS'): + MODULE_OBJECTS.ALL_OBJECTS = layers.__dict__ + MODULE_OBJECTS.ALL_OBJECTS['InputLayer'] = layers.InputLayer + MODULE_OBJECTS.ALL_OBJECTS['Functional'] = models.Functional + MODULE_OBJECTS.ALL_OBJECTS['Model'] = models.Model + MODULE_OBJECTS.ALL_OBJECTS['Sequential'] = models.Sequential + batch_input_shape = config['config'].pop('batch_input_shape', None) + if batch_input_shape is not None: + if config['class_name'] == 'InputLayer': + config['config']['batch_shape'] = batch_input_shape + else: + config['config']['input_shape'] = batch_input_shape + axis = config['config'].pop('axis', None) + if axis is not None and isinstance(axis, list) and (len(axis) == 1): + config['config']['axis'] = int(axis[0]) + if config['class_name'] == 'Lambda': + for dep_arg in LAMBDA_DEP_ARGS: + _ = config['config'].pop(dep_arg, None) + function_config = config['config']['function'] + if isinstance(function_config, list): + function_dict = {'class_name': '__lambda__', 'config': {}} + function_dict['config']['code'] = function_config[0] + function_dict['config']['defaults'] = function_config[1] + function_dict['config']['closure'] = function_config[2] + config['config']['function'] = function_dict + config = _find_replace_nested_dict(config, 'keras.', 'keras.') + return serialization.deserialize_keras_object(config, module_objects=MODULE_OBJECTS.ALL_OBJECTS, custom_objects=custom_objects, printable_module_name='layer') + +def model_metadata(model, include_optimizer=True, require_config=True): + from keras.src import __version__ as keras_version + model_config = {'class_name': model.__class__.__name__} + try: + model_config['config'] = model.get_config() + except NotImplementedError as e: + if require_config: + raise e + metadata = dict(keras_version=str(keras_version), backend=backend.backend(), model_config=model_config) + if getattr(model, 'optimizer', False) and include_optimizer: + if model.compiled: + training_config = model._compile_config.config + training_config.pop('optimizer', None) + metadata['training_config'] = _serialize_nested_config(training_config) + optimizer_config = {'class_name': object_registration.get_registered_name(model.optimizer.__class__), 'config': model.optimizer.get_config()} + metadata['training_config']['optimizer_config'] = optimizer_config + return metadata + +def compile_args_from_training_config(training_config, custom_objects=None): + if custom_objects is None: + custom_objects = {} + with object_registration.CustomObjectScope(custom_objects): + optimizer_config = training_config['optimizer_config'] + optimizer = optimizers.deserialize(optimizer_config) + optimizer = _resolve_compile_arguments_compat(optimizer, optimizer_config, optimizers) + loss = None + loss_config = training_config.get('loss', None) + if loss_config is not None: + loss = _deserialize_nested_config(losses.deserialize, loss_config) + loss = _resolve_compile_arguments_compat(loss, loss_config, losses) + metrics = None + metrics_config = training_config.get('metrics', None) + if metrics_config is not None: + metrics = _deserialize_nested_config(_deserialize_metric, metrics_config) + metrics = _resolve_compile_arguments_compat(metrics, metrics_config, metrics_module) + weighted_metrics = None + weighted_metrics_config = training_config.get('weighted_metrics', None) + if weighted_metrics_config is not None: + weighted_metrics = _deserialize_nested_config(_deserialize_metric, weighted_metrics_config) + loss_weights = training_config['loss_weights'] + return dict(optimizer=optimizer, loss=loss, metrics=metrics, weighted_metrics=weighted_metrics, loss_weights=loss_weights) + +def _serialize_nested_config(config): + + def _serialize_fn(obj): + if callable(obj): + return serialization.serialize_keras_object(obj) + return obj + return tree.map_structure(_serialize_fn, config) + +def _deserialize_nested_config(deserialize_fn, config): + + def _is_single_object(obj): + if isinstance(obj, dict) and 'class_name' in obj: + return True + if isinstance(obj, str): + return True + return False + if config is None: + return None + if _is_single_object(config): + return deserialize_fn(config) + elif isinstance(config, dict): + return {k: _deserialize_nested_config(deserialize_fn, v) for (k, v) in config.items()} + elif isinstance(config, (tuple, list)): + return [_deserialize_nested_config(deserialize_fn, obj) for obj in config] + raise ValueError(f'Saved configuration not understood. Configuration should be a dictionary, string, tuple or list. Received: config={config}.') + +def _deserialize_metric(metric_config): + if metric_config in ['accuracy', 'acc', 'crossentropy', 'ce']: + return metric_config + return metrics_module.deserialize(metric_config) + +def _find_replace_nested_dict(config, find, replace): + dict_str = json.dumps(config) + dict_str = dict_str.replace(find, replace) + config = json.loads(dict_str) + return config + +def _resolve_compile_arguments_compat(obj, obj_config, module): + if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT: + obj = module.get(obj_config['config']['name']) + return obj + +def try_build_compiled_arguments(model): + try: + if not model.compiled_loss.built: + model.compiled_loss.build(model.outputs) + if not model.compiled_metrics.built: + model.compiled_metrics.build(model.outputs, model.outputs) + except: + logging.warning('Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.') + +# File: keras-master/keras/src/legacy/saving/serialization.py +"""""" +import contextlib +import inspect +import json +import threading +import weakref +from keras.src.api_export import keras_export +from keras.src.saving import object_registration +_SKIP_FAILED_SERIALIZATION = False +_LAYER_UNDEFINED_CONFIG_KEY = 'layer was saved without config' +SHARED_OBJECT_KEY = 'shared_object_id' +SHARED_OBJECT_DISABLED = threading.local() +SHARED_OBJECT_LOADING = threading.local() +SHARED_OBJECT_SAVING = threading.local() + +def _shared_object_disabled(): + return getattr(SHARED_OBJECT_DISABLED, 'disabled', False) + +def _shared_object_loading_scope(): + return getattr(SHARED_OBJECT_LOADING, 'scope', NoopLoadingScope()) + +def _shared_object_saving_scope(): + return getattr(SHARED_OBJECT_SAVING, 'scope', None) + +class DisableSharedObjectScope: + + def __enter__(self): + SHARED_OBJECT_DISABLED.disabled = True + self._orig_loading_scope = _shared_object_loading_scope() + self._orig_saving_scope = _shared_object_saving_scope() + + def __exit__(self, *args, **kwargs): + SHARED_OBJECT_DISABLED.disabled = False + SHARED_OBJECT_LOADING.scope = self._orig_loading_scope + SHARED_OBJECT_SAVING.scope = self._orig_saving_scope + +class NoopLoadingScope: + + def get(self, unused_object_id): + return None + + def set(self, object_id, obj): + pass + +class SharedObjectLoadingScope: + + def __enter__(self): + if _shared_object_disabled(): + return NoopLoadingScope() + global SHARED_OBJECT_LOADING + SHARED_OBJECT_LOADING.scope = self + self._obj_ids_to_obj = {} + return self + + def get(self, object_id): + if object_id is None: + return + return self._obj_ids_to_obj.get(object_id) + + def set(self, object_id, obj): + if object_id is None: + return + self._obj_ids_to_obj[object_id] = obj + + def __exit__(self, *args, **kwargs): + global SHARED_OBJECT_LOADING + SHARED_OBJECT_LOADING.scope = NoopLoadingScope() + +class SharedObjectConfig(dict): + + def __init__(self, base_config, object_id, **kwargs): + self.ref_count = 1 + self.object_id = object_id + super().__init__(base_config, **kwargs) + + def increment_ref_count(self): + if self.ref_count == 1: + self[SHARED_OBJECT_KEY] = self.object_id + self.ref_count += 1 + +class SharedObjectSavingScope: + + def __enter__(self): + if _shared_object_disabled(): + return None + global SHARED_OBJECT_SAVING + if _shared_object_saving_scope() is not None: + self._passthrough = True + return _shared_object_saving_scope() + else: + self._passthrough = False + SHARED_OBJECT_SAVING.scope = self + self._shared_objects_config = weakref.WeakKeyDictionary() + self._next_id = 0 + return self + + def get_config(self, obj): + try: + shared_object_config = self._shared_objects_config[obj] + except (TypeError, KeyError): + return None + shared_object_config.increment_ref_count() + return shared_object_config + + def create_config(self, base_config, obj): + shared_object_config = SharedObjectConfig(base_config, self._next_id) + self._next_id += 1 + try: + self._shared_objects_config[obj] = shared_object_config + except TypeError: + pass + return shared_object_config + + def __exit__(self, *args, **kwargs): + if not getattr(self, '_passthrough', False): + global SHARED_OBJECT_SAVING + SHARED_OBJECT_SAVING.scope = None + +def serialize_keras_class_and_config(cls_name, cls_config, obj=None, shared_object_id=None): + base_config = {'class_name': cls_name, 'config': cls_config} + if shared_object_id is not None: + base_config[SHARED_OBJECT_KEY] = shared_object_id + if _shared_object_saving_scope() is not None and obj is not None: + shared_object_config = _shared_object_saving_scope().get_config(obj) + if shared_object_config is None: + return _shared_object_saving_scope().create_config(base_config, obj) + return shared_object_config + return base_config + +@contextlib.contextmanager +def skip_failed_serialization(): + global _SKIP_FAILED_SERIALIZATION + prev = _SKIP_FAILED_SERIALIZATION + try: + _SKIP_FAILED_SERIALIZATION = True + yield + finally: + _SKIP_FAILED_SERIALIZATION = prev + +@keras_export(['keras.legacy.saving.serialize_keras_object', 'keras.utils.legacy.serialize_keras_object']) +def serialize_keras_object(instance): + instance = inspect.unwrap(instance) + if instance is None: + return None + if hasattr(instance, 'get_config'): + name = object_registration.get_registered_name(instance.__class__) + try: + config = instance.get_config() + except NotImplementedError as e: + if _SKIP_FAILED_SERIALIZATION: + return serialize_keras_class_and_config(name, {_LAYER_UNDEFINED_CONFIG_KEY: True}) + raise e + serialization_config = {} + for (key, item) in config.items(): + if isinstance(item, str): + serialization_config[key] = item + continue + try: + serialized_item = serialize_keras_object(item) + if isinstance(serialized_item, dict) and (not isinstance(item, dict)): + serialized_item['__passive_serialization__'] = True + serialization_config[key] = serialized_item + except ValueError: + serialization_config[key] = item + name = object_registration.get_registered_name(instance.__class__) + return serialize_keras_class_and_config(name, serialization_config, instance) + if hasattr(instance, '__name__'): + return object_registration.get_registered_name(instance) + raise ValueError(f"Cannot serialize {instance} because it doesn't implement `get_config()`.") + +def class_and_config_for_serialized_keras_object(config, module_objects=None, custom_objects=None, printable_module_name='object'): + if not isinstance(config, dict) or 'class_name' not in config or 'config' not in config: + raise ValueError(f'Improper config format for {config}. Expecting python dict contains `class_name` and `config` as keys') + class_name = config['class_name'] + cls = object_registration.get_registered_object(class_name, custom_objects, module_objects) + if cls is None: + raise ValueError(f"Unknown {printable_module_name}: '{class_name}'. Please ensure you are using a `keras.utils.custom_object_scope` and that this object is included in the scope. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.") + cls_config = config['config'] + if isinstance(cls_config, list): + return (cls, cls_config) + deserialized_objects = {} + for (key, item) in cls_config.items(): + if key == 'name': + deserialized_objects[key] = item + elif isinstance(item, dict) and '__passive_serialization__' in item: + deserialized_objects[key] = deserialize_keras_object(item, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='config_item') + elif isinstance(item, str) and inspect.isfunction(object_registration.get_registered_object(item, custom_objects)): + deserialized_objects[key] = object_registration.get_registered_object(item, custom_objects) + for (key, item) in deserialized_objects.items(): + cls_config[key] = deserialized_objects[key] + return (cls, cls_config) + +@keras_export(['keras.legacy.saving.deserialize_keras_object', 'keras.utils.legacy.deserialize_keras_object']) +def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): + if identifier is None: + return None + if isinstance(identifier, dict): + config = identifier + (cls, cls_config) = class_and_config_for_serialized_keras_object(config, module_objects, custom_objects, printable_module_name) + shared_object_id = config.get(SHARED_OBJECT_KEY) + shared_object = _shared_object_loading_scope().get(shared_object_id) + if shared_object is not None: + return shared_object + if hasattr(cls, 'from_config'): + arg_spec = inspect.getfullargspec(cls.from_config) + custom_objects = custom_objects or {} + cls_config = _find_replace_nested_dict(cls_config, 'keras.', 'keras.') + if 'custom_objects' in arg_spec.args: + deserialized_obj = cls.from_config(cls_config, custom_objects={**object_registration.GLOBAL_CUSTOM_OBJECTS, **custom_objects}) + else: + with object_registration.CustomObjectScope(custom_objects): + deserialized_obj = cls.from_config(cls_config) + else: + custom_objects = custom_objects or {} + with object_registration.CustomObjectScope(custom_objects): + deserialized_obj = cls(**cls_config) + _shared_object_loading_scope().set(shared_object_id, deserialized_obj) + return deserialized_obj + elif isinstance(identifier, str): + object_name = identifier + if custom_objects and object_name in custom_objects: + obj = custom_objects.get(object_name) + elif object_name in object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__: + obj = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__[object_name] + elif object_name in object_registration._GLOBAL_CUSTOM_OBJECTS: + obj = object_registration._GLOBAL_CUSTOM_OBJECTS[object_name] + else: + obj = module_objects.get(object_name) + if obj is None: + raise ValueError(f"Unknown {printable_module_name}: '{object_name}'. Please ensure you are using a `keras.utils.custom_object_scope` and that this object is included in the scope. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.") + if inspect.isclass(obj): + return obj() + return obj + elif inspect.isfunction(identifier): + return identifier + else: + raise ValueError(f'Could not interpret serialized {printable_module_name}: {identifier}') + +def validate_config(config): + return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config + +def is_default(method): + return getattr(method, '_is_default', False) + +def _find_replace_nested_dict(config, find, replace): + dict_str = json.dumps(config) + dict_str = dict_str.replace(find, replace) + config = json.loads(dict_str) + return config + +# File: keras-master/keras/src/losses/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import LossFunctionWrapper +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky +from keras.src.saving import serialization_lib +ALL_OBJECTS = {Loss, LossFunctionWrapper, KLDivergence, Poisson, BinaryCrossentropy, BinaryFocalCrossentropy, CategoricalCrossentropy, CategoricalFocalCrossentropy, SparseCategoricalCrossentropy, MeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, MeanSquaredLogarithmicError, CosineSimilarity, LogCosh, Huber, Hinge, SquaredHinge, CategoricalHinge, Dice, Tversky, CTC, kl_divergence, poisson, binary_crossentropy, binary_focal_crossentropy, categorical_crossentropy, categorical_focal_crossentropy, sparse_categorical_crossentropy, mean_squared_error, mean_absolute_error, mean_absolute_percentage_error, mean_squared_logarithmic_error, cosine_similarity, log_cosh, huber, hinge, squared_hinge, categorical_hinge, dice, tversky, ctc} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({'bce': binary_crossentropy, 'BCE': binary_crossentropy, 'kld': kl_divergence, 'KLD': kl_divergence, 'mae': mean_absolute_error, 'MAE': mean_absolute_error, 'mse': mean_squared_error, 'MSE': mean_squared_error, 'mape': mean_absolute_percentage_error, 'MAPE': mean_absolute_percentage_error, 'msle': mean_squared_logarithmic_error, 'MSLE': mean_squared_logarithmic_error}) + +@keras_export('keras.losses.serialize') +def serialize(loss): + return serialization_lib.serialize_keras_object(loss) + +@keras_export('keras.losses.deserialize') +def deserialize(name, custom_objects=None): + return serialization_lib.deserialize_keras_object(name, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.losses.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f'Could not interpret loss identifier: {identifier}') + +# File: keras-master/keras/src/losses/loss.py +from keras.src import backend +from keras.src import dtype_policies +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils.naming import auto_name + +@keras_export(['keras.Loss', 'keras.losses.Loss']) +class Loss(KerasSaveable): + + def __init__(self, name=None, reduction='sum_over_batch_size', dtype=None): + self.name = name or auto_name(self.__class__.__name__) + self.reduction = standardize_reduction(reduction) + self._dtype_policy = dtype_policies.get(dtype or backend.floatx()) + self._dtype = self._dtype_policy.compute_dtype + + @property + def dtype(self): + return self._dtype + + def __call__(self, y_true, y_pred, sample_weight=None): + in_mask = getattr(y_pred, '_keras_mask', None) + with ops.name_scope(self.name): + y_pred = tree.map_structure(lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred) + y_true = tree.map_structure(lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true) + losses = self.call(y_true, y_pred) + out_mask = getattr(losses, '_keras_mask', None) + if in_mask is not None and out_mask is not None: + mask = in_mask & out_mask + elif in_mask is not None: + mask = in_mask + elif out_mask is not None: + mask = out_mask + else: + mask = None + return reduce_weighted_values(losses, sample_weight=sample_weight, mask=mask, reduction=self.reduction, dtype=self.dtype) + + def call(self, y_true, y_pred): + raise NotImplementedError + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def _obj_type(self): + return 'Loss' + +def standardize_reduction(reduction): + allowed = {'sum_over_batch_size', 'sum', None, 'none'} + if reduction not in allowed: + raise ValueError(f'Invalid value for argument `reduction`. Expected one of {allowed}. Received: reduction={reduction}') + return reduction + +def squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1=True): + x1_rank = len(x1.shape) + x2_rank = len(x2.shape) + if x1_rank == x2_rank: + return (x1, x2) + if x1_rank == x2_rank + 1: + if x1.shape[-1] == 1: + if x2_rank == 1 and expand_rank_1: + x2 = ops.expand_dims(x2, axis=-1) + else: + x1 = ops.squeeze(x1, axis=-1) + if x2_rank == x1_rank + 1: + if x2.shape[-1] == 1: + if x1_rank == 1 and expand_rank_1: + x1 = ops.expand_dims(x1, axis=-1) + else: + x2 = ops.squeeze(x2, axis=-1) + return (x1, x2) + +def reduce_values(values, reduction='sum_over_batch_size'): + if reduction is None or reduction == 'none' or tuple(values.shape) == () or (tuple(values.shape) == (0,)): + return values + loss = ops.sum(values) + if reduction == 'sum_over_batch_size': + loss /= ops.cast(ops.prod(ops.convert_to_tensor(ops.shape(values), dtype='int32')), loss.dtype) + return loss + +def reduce_weighted_values(values, sample_weight=None, mask=None, reduction='sum_over_batch_size', dtype=None): + reduction = standardize_reduction(reduction) + values = ops.convert_to_tensor(values, dtype=dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype) + if mask is not None: + mask = ops.convert_to_tensor(mask, dtype=dtype) + sample_weight = apply_mask(sample_weight, mask, dtype=values.dtype, reduction=reduction) + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, values.dtype) + (values, sample_weight) = squeeze_or_expand_to_same_rank(values, sample_weight) + values = values * sample_weight + loss = reduce_values(values, reduction) + return loss + +def apply_mask(sample_weight, mask, dtype, reduction): + if mask is not None: + mask = ops.cast(mask, dtype=dtype) + if reduction == 'sum_over_batch_size': + total = ops.cast(ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype='int32')), dtype) + valid = ops.sum(mask) + mask *= total / (valid + backend.epsilon()) + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, dtype=dtype) + (mask, sample_weight) = squeeze_or_expand_to_same_rank(mask, sample_weight) + sample_weight *= mask + else: + sample_weight = mask + return sample_weight + +# File: keras-master/keras/src/losses/losses.py +import warnings +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.saving import serialization_lib +from keras.src.utils.numerical_utils import normalize + +class LossFunctionWrapper(Loss): + + def __init__(self, fn, reduction='sum_over_batch_size', name=None, dtype=None, **kwargs): + super().__init__(name=name, reduction=reduction, dtype=dtype) + self.fn = fn + self._fn_kwargs = kwargs + + def call(self, y_true, y_pred): + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + return self.fn(y_true, y_pred, **self._fn_kwargs) + + def get_config(self): + config = super().get_config() + config.update({'fn': serialization_lib.serialize_keras_object(self.fn)}) + config.update(serialization_lib.serialize_keras_object(self._fn_kwargs)) + return config + + @classmethod + def from_config(cls, config): + if 'fn' in config: + config = serialization_lib.deserialize_keras_object(config) + return cls(**config) + +@keras_export('keras.losses.MeanSquaredError') +class MeanSquaredError(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='mean_squared_error', dtype=None): + super().__init__(mean_squared_error, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.MeanAbsoluteError') +class MeanAbsoluteError(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='mean_absolute_error', dtype=None): + super().__init__(mean_absolute_error, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.MeanAbsolutePercentageError') +class MeanAbsolutePercentageError(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='mean_absolute_percentage_error', dtype=None): + super().__init__(mean_absolute_percentage_error, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.MeanSquaredLogarithmicError') +class MeanSquaredLogarithmicError(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='mean_squared_logarithmic_error', dtype=None): + super().__init__(mean_squared_logarithmic_error, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.CosineSimilarity') +class CosineSimilarity(LossFunctionWrapper): + + def __init__(self, axis=-1, reduction='sum_over_batch_size', name='cosine_similarity', dtype=None): + super().__init__(cosine_similarity, name=name, reduction=reduction, dtype=dtype, axis=axis) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.Huber') +class Huber(LossFunctionWrapper): + + def __init__(self, delta=1.0, reduction='sum_over_batch_size', name='huber_loss', dtype=None): + super().__init__(huber, name=name, reduction=reduction, dtype=dtype, delta=delta) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.LogCosh') +class LogCosh(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='log_cosh', dtype=None): + super().__init__(log_cosh, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.Hinge') +class Hinge(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='hinge', dtype=None): + super().__init__(hinge, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.SquaredHinge') +class SquaredHinge(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='squared_hinge', dtype=None): + super().__init__(squared_hinge, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.CategoricalHinge') +class CategoricalHinge(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='categorical_hinge', dtype=None): + super().__init__(categorical_hinge, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.KLDivergence') +class KLDivergence(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='kl_divergence', dtype=None): + super().__init__(kl_divergence, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.Poisson') +class Poisson(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='poisson', dtype=None): + super().__init__(poisson, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + +@keras_export('keras.losses.BinaryCrossentropy') +class BinaryCrossentropy(LossFunctionWrapper): + + def __init__(self, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='binary_crossentropy', dtype=None): + super().__init__(binary_crossentropy, name=name, reduction=reduction, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis} + +@keras_export('keras.losses.BinaryFocalCrossentropy') +class BinaryFocalCrossentropy(LossFunctionWrapper): + + def __init__(self, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='binary_focal_crossentropy', dtype=None): + super().__init__(binary_focal_crossentropy, name=name, reduction=reduction, dtype=dtype, apply_class_balancing=apply_class_balancing, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + self.apply_class_balancing = apply_class_balancing + self.alpha = alpha + self.gamma = gamma + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis, 'apply_class_balancing': self.apply_class_balancing, 'alpha': self.alpha, 'gamma': self.gamma} + +@keras_export('keras.losses.CategoricalCrossentropy') +class CategoricalCrossentropy(LossFunctionWrapper): + + def __init__(self, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='categorical_crossentropy', dtype=None): + super().__init__(categorical_crossentropy, name=name, reduction=reduction, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis} + +@keras_export('keras.losses.CategoricalFocalCrossentropy') +class CategoricalFocalCrossentropy(LossFunctionWrapper): + + def __init__(self, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='categorical_focal_crossentropy', dtype=None): + super().__init__(categorical_focal_crossentropy, name=name, reduction=reduction, dtype=dtype, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + self.alpha = alpha + self.gamma = gamma + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis, 'alpha': self.alpha, 'gamma': self.gamma} + +@keras_export('keras.losses.SparseCategoricalCrossentropy') +class SparseCategoricalCrossentropy(LossFunctionWrapper): + + def __init__(self, from_logits=False, ignore_class=None, reduction='sum_over_batch_size', name='sparse_categorical_crossentropy', dtype=None): + super().__init__(sparse_categorical_crossentropy, name=name, reduction=reduction, dtype=dtype, from_logits=from_logits, ignore_class=ignore_class) + self.from_logits = from_logits + self.ignore_class = ignore_class + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'from_logits': self.from_logits, 'ignore_class': self.ignore_class} + +def convert_binary_labels_to_hinge(y_true): + are_zeros = ops.equal(y_true, 0) + are_ones = ops.equal(y_true, 1) + is_binary = ops.all(ops.logical_or(are_zeros, are_ones)) + + def _convert_binary_labels(): + return 2.0 * y_true - 1.0 + + def _return_labels_unconverted(): + return y_true + updated_y_true = ops.cond(is_binary, _convert_binary_labels, _return_labels_unconverted) + return updated_y_true + +@keras_export(['keras.metrics.hinge', 'keras.losses.hinge']) +def hinge(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, dtype=y_pred.dtype) + y_true = ops.convert_to_tensor(y_true) + y_true = convert_binary_labels_to_hinge(y_true) + return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1) + +@keras_export(['keras.metrics.squared_hinge', 'keras.losses.squared_hinge']) +def squared_hinge(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + y_true = convert_binary_labels_to_hinge(y_true) + return ops.mean(ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1) + +@keras_export(['keras.metrics.categorical_hinge', 'keras.losses.categorical_hinge']) +def categorical_hinge(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + pos = ops.sum(y_true * y_pred, axis=-1) + neg = ops.max((1.0 - y_true) * y_pred, axis=-1) + zero = ops.cast(0.0, y_pred.dtype) + return ops.maximum(neg - pos + 1.0, zero) + +@keras_export(['keras.metrics.mean_squared_error', 'keras.losses.mean_squared_error', 'keras._legacy.losses.mse', 'keras._legacy.losses.MSE', 'keras._legacy.metrics.mse', 'keras._legacy.metrics.MSE']) +def mean_squared_error(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.mean(ops.square(y_true - y_pred), axis=-1) + +@keras_export(['keras.metrics.mean_absolute_error', 'keras.losses.mean_absolute_error', 'keras._legacy.losses.MAE', 'keras._legacy.losses.mae', 'keras._legacy.metrics.MAE', 'keras._legacy.metrics.mae']) +def mean_absolute_error(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.mean(ops.abs(y_true - y_pred), axis=-1) + +@keras_export(['keras.metrics.mean_absolute_percentage_error', 'keras.losses.mean_absolute_percentage_error', 'keras._legacy.losses.mape', 'keras._legacy.losses.MAPE', 'keras._legacy.metrics.mape', 'keras._legacy.metrics.MAPE']) +def mean_absolute_percentage_error(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + diff = ops.abs((y_true - y_pred) / ops.maximum(ops.abs(y_true), epsilon)) + return 100.0 * ops.mean(diff, axis=-1) + +@keras_export(['keras.metrics.mean_squared_logarithmic_error', 'keras.losses.mean_squared_logarithmic_error', 'keras._legacy.losses.msle', 'keras._legacy.losses.MSLE', 'keras._legacy.metrics.msle', 'keras._legacy.metrics.MSLE']) +def mean_squared_logarithmic_error(y_true, y_pred): + epsilon = ops.convert_to_tensor(backend.epsilon()) + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + first_log = ops.log(ops.maximum(y_pred, epsilon) + 1.0) + second_log = ops.log(ops.maximum(y_true, epsilon) + 1.0) + return ops.mean(ops.square(first_log - second_log), axis=-1) + +@keras_export('keras.losses.cosine_similarity') +def cosine_similarity(y_true, y_pred, axis=-1): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + y_pred = normalize(y_pred, axis=axis) + y_true = normalize(y_true, axis=axis) + return -ops.sum(y_true * y_pred, axis=axis) + +@keras_export(['keras.losses.huber', 'keras.metrics.huber']) +def huber(y_true, y_pred, delta=1.0): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + delta = ops.convert_to_tensor(delta, dtype=y_pred.dtype) + error = ops.subtract(y_pred, y_true) + abs_error = ops.abs(error) + half = ops.convert_to_tensor(0.5, dtype=abs_error.dtype) + return ops.mean(ops.where(abs_error <= delta, half * ops.square(error), delta * abs_error - half * ops.square(delta)), axis=-1) + +@keras_export(['keras.losses.log_cosh', 'keras.metrics.log_cosh', 'keras._legacy.losses.logcosh', 'keras._legacy.metrics.logcosh']) +def log_cosh(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + log2 = ops.convert_to_tensor(ops.log(2.0), dtype=y_pred.dtype) + + def _logcosh(x): + return x + ops.softplus(x * -2.0) - log2 + return ops.mean(_logcosh(y_pred - y_true), axis=-1) + +@keras_export(['keras.metrics.kl_divergence', 'keras.losses.kl_divergence', 'keras._legacy.losses.KLD', 'keras._legacy.losses.kld', 'keras._legacy.losses.kullback_leibler_divergence', 'keras._legacy.metrics.KLD', 'keras._legacy.metrics.kld', 'keras._legacy.metrics.kullback_leibler_divergence']) +def kl_divergence(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, y_pred.dtype) + y_true = ops.clip(y_true, backend.epsilon(), 1) + y_pred = ops.clip(y_pred, backend.epsilon(), 1) + return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1) + +@keras_export(['keras.metrics.poisson', 'keras.losses.poisson']) +def poisson(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype) + return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1) + +@keras_export(['keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy']) +def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1): + if isinstance(axis, bool): + raise ValueError(f'`axis` must be of type `int`. Received: axis={axis} of type {type(axis)}') + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + if y_pred.shape[-1] == 1: + warnings.warn(f"In loss categorical_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape={y_pred.shape}. Consider using 'binary_crossentropy' if you only have 2 classes.", SyntaxWarning, stacklevel=2) + if label_smoothing: + num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype) + y_true = y_true * (1.0 - label_smoothing) + label_smoothing / num_classes + return ops.categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis) + +@keras_export(['keras.metrics.categorical_focal_crossentropy', 'keras.losses.categorical_focal_crossentropy']) +def categorical_focal_crossentropy(y_true, y_pred, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1): + if isinstance(axis, bool): + raise ValueError(f'`axis` must be of type `int`. Received: axis={axis} of type {type(axis)}') + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + if y_pred.shape[-1] == 1: + warnings.warn(f"In loss categorical_focal_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape={y_pred.shape}. Consider using 'binary_crossentropy' if you only have 2 classes.", SyntaxWarning, stacklevel=2) + if label_smoothing: + num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype) + y_true = y_true * (1.0 - label_smoothing) + label_smoothing / num_classes + if from_logits: + y_pred = ops.softmax(y_pred, axis=axis) + output = y_pred / ops.sum(y_pred, axis=axis, keepdims=True) + output = ops.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + cce = -y_true * ops.log(output) + modulating_factor = ops.power(1.0 - output, gamma) + weighting_factor = ops.multiply(modulating_factor, alpha) + focal_cce = ops.multiply(weighting_factor, cce) + focal_cce = ops.sum(focal_cce, axis=axis) + return focal_cce + +@keras_export(['keras.metrics.sparse_categorical_crossentropy', 'keras.losses.sparse_categorical_crossentropy']) +def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, ignore_class=None, axis=-1): + if len(y_true.shape) == len(y_pred.shape) and y_true.shape[-1] == 1: + y_true = ops.squeeze(y_true, axis=-1) + if ignore_class is not None: + res_shape = ops.shape(y_pred)[:-1] + valid_mask = ops.not_equal(y_true, ops.cast(ignore_class, y_pred.dtype)) + y_true = y_true * ops.cast(valid_mask, y_true.dtype) + y_pred = y_pred * ops.cast(ops.expand_dims(valid_mask, -1), y_pred.dtype) + res = ops.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis) + if ignore_class is not None: + valid_mask = ops.reshape(valid_mask, res_shape) + res = ops.where(valid_mask, res, 0.0) + try: + res._keras_mask = valid_mask + except AttributeError: + pass + return res + +@keras_export(['keras.metrics.binary_crossentropy', 'keras.losses.binary_crossentropy']) +def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + if label_smoothing: + y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing + return ops.mean(ops.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=axis) + +@keras_export(['keras.metrics.binary_focal_crossentropy', 'keras.losses.binary_focal_crossentropy']) +def binary_focal_crossentropy(y_true, y_pred, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + if label_smoothing: + y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing + if from_logits: + y_pred = ops.sigmoid(y_pred) + bce = ops.binary_crossentropy(target=y_true, output=y_pred, from_logits=False) + p_t = y_true * y_pred + (1 - y_true) * (1 - y_pred) + focal_factor = ops.power(1.0 - p_t, gamma) + focal_bce = focal_factor * bce + if apply_class_balancing: + weight = y_true * alpha + (1 - y_true) * (1 - alpha) + focal_bce = weight * focal_bce + return ops.mean(focal_bce, axis=axis) + +@keras_export('keras.losses.CTC') +class CTC(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='ctc', dtype=None): + super().__init__(ctc, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction} + +@keras_export('keras.losses.ctc') +def ctc(y_true, y_pred): + if len(ops.shape(y_true)) != 2: + raise ValueError(f'Targets `y_true` are expected to be a tensor of shape `(batch_size, max_length)` in integer format. Received: y_true.shape={ops.shape(y_true)}') + if len(ops.shape(y_pred)) != 3: + raise ValueError(f'Logits `y_pred` are expected to be a tensor of shape `(batch_size, max_length, num_classes)`. Received: y_pred.shape={ops.shape(y_pred)}') + mask_index = 0 + batch_length = ops.shape(y_pred)[0] + input_length = ops.shape(y_pred)[1] + input_length = input_length * ops.ones((batch_length,), dtype='int32') + label_length = ops.cast(ops.sum(y_true != mask_index, axis=-1), dtype='int32') + return ops.ctc_loss(y_true, y_pred, label_length, input_length, mask_index=mask_index) + +@keras_export('keras.losses.Dice') +class Dice(LossFunctionWrapper): + + def __init__(self, reduction='sum_over_batch_size', name='dice', axis=None, dtype=None): + super().__init__(dice, name=name, reduction=reduction, dtype=dtype, axis=axis) + self.axis = axis + + def get_config(self): + return {'name': self.name, 'reduction': self.reduction, 'axis': self.axis} + +@keras_export('keras.losses.dice') +def dice(y_true, y_pred, axis=None): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + inputs = y_true + targets = y_pred + intersection = ops.sum(inputs * targets, axis=axis) + dice = ops.divide(2.0 * intersection, ops.sum(y_true, axis=axis) + ops.sum(y_pred, axis=axis) + backend.epsilon()) + return 1 - dice + +@keras_export('keras.losses.Tversky') +class Tversky(LossFunctionWrapper): + + def __init__(self, alpha=0.5, beta=0.5, reduction='sum_over_batch_size', name='tversky', dtype=None): + super().__init__(tversky, name=name, reduction=reduction, dtype=dtype, alpha=alpha, beta=beta) + self.alpha = alpha + self.beta = beta + + def get_config(self): + return {'name': self.name, 'alpha': self.alpha, 'beta': self.beta, 'reduction': self.reduction} + +@keras_export('keras.losses.tversky') +def tversky(y_true, y_pred, alpha=0.5, beta=0.5): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + inputs = ops.reshape(y_true, [-1]) + targets = ops.reshape(y_pred, [-1]) + intersection = ops.sum(inputs * targets) + fp = ops.sum((1 - targets) * inputs) + fn = ops.sum(targets * (1 - inputs)) + tversky = ops.divide(intersection, intersection + fp * alpha + fn * beta + backend.epsilon()) + return 1 - tversky + +# File: keras-master/keras/src/metrics/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import SparseCategoricalCrossentropy +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case +ALL_OBJECTS = {Metric, Mean, Sum, MeanMetricWrapper, MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, MeanSquaredLogarithmicError, CosineSimilarity, LogCoshError, R2Score, AUC, FalseNegatives, FalsePositives, Precision, PrecisionAtRecall, Recall, RecallAtPrecision, SensitivityAtSpecificity, SpecificityAtSensitivity, TrueNegatives, TruePositives, Hinge, SquaredHinge, CategoricalHinge, KLDivergence, Poisson, BinaryCrossentropy, CategoricalCrossentropy, SparseCategoricalCrossentropy, Accuracy, BinaryAccuracy, CategoricalAccuracy, SparseCategoricalAccuracy, TopKCategoricalAccuracy, SparseTopKCategoricalAccuracy, F1Score, FBetaScore, IoU, BinaryIoU, MeanIoU, OneHotIoU, OneHotMeanIoU} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}) +ALL_OBJECTS_DICT.update({'bce': BinaryCrossentropy, 'BCE': BinaryCrossentropy, 'mse': MeanSquaredError, 'MSE': MeanSquaredError, 'mae': MeanAbsoluteError, 'MAE': MeanAbsoluteError, 'mape': MeanAbsolutePercentageError, 'MAPE': MeanAbsolutePercentageError, 'msle': MeanSquaredLogarithmicError, 'MSLE': MeanSquaredLogarithmicError}) + +@keras_export('keras.metrics.serialize') +def serialize(metric): + return serialization_lib.serialize_keras_object(metric) + +@keras_export('keras.metrics.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.metrics.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f'Could not interpret metric identifier: {identifier}') + +# File: keras-master/keras/src/metrics/accuracy_metrics.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.metrics import reduction_metrics + +def accuracy(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()) + +@keras_export('keras.metrics.Accuracy') +class Accuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='accuracy', dtype=None): + super().__init__(fn=accuracy, name=name, dtype=dtype) + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.binary_accuracy') +def binary_accuracy(y_true, y_pred, threshold=0.5): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + threshold = ops.cast(threshold, y_pred.dtype) + y_pred = ops.cast(y_pred > threshold, y_true.dtype) + return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()) + +@keras_export('keras.metrics.BinaryAccuracy') +class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5): + super().__init__(fn=binary_accuracy, name=name, dtype=dtype, threshold=threshold) + self.threshold = threshold + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'threshold': self.threshold} + +@keras_export('keras.metrics.categorical_accuracy') +def categorical_accuracy(y_true, y_pred): + y_true = ops.argmax(y_true, axis=-1) + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true_org_shape = ops.shape(y_true) + y_pred_rank = len(y_pred.shape) + y_true_rank = len(y_true.shape) + if y_true_rank is not None and y_pred_rank is not None and (len(y_true.shape) == len(y_pred.shape)): + y_true = ops.squeeze(y_true, -1) + reshape_matches = True + y_pred = ops.argmax(y_pred, axis=-1) + if y_pred.dtype is not y_true.dtype: + y_pred = ops.cast(y_pred, dtype=y_true.dtype) + matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + return matches + +@keras_export('keras.metrics.CategoricalAccuracy') +class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='categorical_accuracy', dtype=None): + super().__init__(fn=categorical_accuracy, name=name, dtype=dtype) + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.sparse_categorical_accuracy') +def sparse_categorical_accuracy(y_true, y_pred): + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true_org_shape = ops.shape(y_true) + y_pred_rank = len(y_pred.shape) + y_true_rank = len(y_true.shape) + if y_true_rank is not None and y_pred_rank is not None and (len(y_true.shape) == len(y_pred.shape)) and (ops.shape(y_true)[-1] == 1): + y_true = ops.squeeze(y_true, -1) + reshape_matches = True + y_pred = ops.argmax(y_pred, axis=-1) + if y_pred.dtype is not y_true.dtype: + y_pred = ops.cast(y_pred, y_true.dtype) + matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + if len(matches.shape) > 1 and matches.shape[-1] == 1: + matches = ops.squeeze(matches, -1) + return matches + +@keras_export('keras.metrics.SparseCategoricalAccuracy') +class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='sparse_categorical_accuracy', dtype=None): + super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype) + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.top_k_categorical_accuracy') +def top_k_categorical_accuracy(y_true, y_pred, k=5): + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true = ops.argmax(y_true, axis=-1) + y_true_rank = len(y_true.shape) + y_pred_rank = len(y_pred.shape) + y_true_org_shape = ops.shape(y_true) + if y_true_rank is not None and y_pred_rank is not None: + if y_pred_rank > 2: + y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]]) + if y_true_rank > 1: + reshape_matches = True + y_true = ops.reshape(y_true, [-1]) + matches = ops.cast(ops.in_top_k(ops.cast(y_true, 'int32'), y_pred, k=k), dtype=backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + return matches + +@keras_export('keras.metrics.TopKCategoricalAccuracy') +class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None): + super().__init__(fn=top_k_categorical_accuracy, name=name, dtype=dtype, k=k) + self.k = k + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'k': self.k} + +@keras_export('keras.metrics.sparse_top_k_categorical_accuracy') +def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true_rank = len(y_true.shape) + y_pred_rank = len(y_pred.shape) + y_true_org_shape = ops.shape(y_true) + if y_true_rank is not None and y_pred_rank is not None: + if y_pred_rank > 2: + y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]]) + if y_true_rank > 1: + reshape_matches = True + y_true = ops.reshape(y_true, [-1]) + matches = ops.cast(ops.in_top_k(ops.cast(y_true, 'int32'), y_pred, k=k), dtype=backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + return matches + +@keras_export('keras.metrics.SparseTopKCategoricalAccuracy') +class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None): + super().__init__(fn=sparse_top_k_categorical_accuracy, name=name, dtype=dtype, k=k) + self.k = k + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'k': self.k} + +# File: keras-master/keras/src/metrics/confusion_metrics.py +import numpy as np +from keras.src import activations +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics import metrics_utils +from keras.src.metrics.metric import Metric +from keras.src.utils.python_utils import to_list + +class _ConfusionMatrixConditionCount(Metric): + + def __init__(self, confusion_matrix_cond, thresholds=None, name=None, dtype=None): + super().__init__(name=name, dtype=dtype) + self._confusion_matrix_cond = confusion_matrix_cond + self.init_thresholds = thresholds + self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=0.5) + self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + self.accumulator = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='accumulator') + + def update_state(self, y_true, y_pred, sample_weight=None): + return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight) + + def result(self): + if len(self.thresholds) == 1: + result = self.accumulator[0] + else: + result = self.accumulator + return backend.convert_to_tensor(result) + + def get_config(self): + config = {'thresholds': self.init_thresholds} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.FalsePositives') +class FalsePositives(_ConfusionMatrixConditionCount): + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype) + +@keras_export('keras.metrics.FalseNegatives') +class FalseNegatives(_ConfusionMatrixConditionCount): + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype) + +@keras_export('keras.metrics.TrueNegatives') +class TrueNegatives(_ConfusionMatrixConditionCount): + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype) + +@keras_export('keras.metrics.TruePositives') +class TruePositives(_ConfusionMatrixConditionCount): + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype) + +@keras_export('keras.metrics.Precision') +class Precision(Metric): + + def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): + super().__init__(name=name, dtype=dtype) + self._direction = 'up' + self.init_thresholds = thresholds + self.top_k = top_k + self.class_id = class_id + default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF + self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=default_threshold) + self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + self.true_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='true_positives') + self.false_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='false_positives') + + def update_state(self, y_true, y_pred, sample_weight=None): + metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight) + + def result(self): + result = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives)) + return result[0] if len(self.thresholds) == 1 else result + + def reset_state(self): + num_thresholds = len(to_list(self.thresholds)) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_positives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = {'thresholds': self.init_thresholds, 'top_k': self.top_k, 'class_id': self.class_id} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.Recall') +class Recall(Metric): + + def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): + super().__init__(name=name, dtype=dtype) + self._direction = 'up' + self.init_thresholds = thresholds + self.top_k = top_k + self.class_id = class_id + default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF + self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=default_threshold) + self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + self.true_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='true_positives') + self.false_negatives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='false_negatives') + + def update_state(self, y_true, y_pred, sample_weight=None): + metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight) + + def result(self): + result = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + return result[0] if len(self.thresholds) == 1 else result + + def reset_state(self): + num_thresholds = len(to_list(self.thresholds)) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_negatives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = {'thresholds': self.init_thresholds, 'top_k': self.top_k, 'class_id': self.class_id} + base_config = super().get_config() + return {**base_config, **config} + +class SensitivitySpecificityBase(Metric): + + def __init__(self, value, num_thresholds=200, class_id=None, name=None, dtype=None): + super().__init__(name=name, dtype=dtype) + self._direction = 'up' + if num_thresholds <= 0: + raise ValueError(f'Argument `num_thresholds` must be an integer > 0. Received: num_thresholds={num_thresholds}') + self.value = value + self.class_id = class_id + if num_thresholds == 1: + self.thresholds = [0.5] + self._thresholds_distributed_evenly = False + else: + thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] + self.thresholds = [0.0] + thresholds + [1.0] + self._thresholds_distributed_evenly = True + self.true_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='true_positives') + self.false_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='false_positives') + self.true_negatives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='true_negatives') + self.false_negatives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='false_negatives') + + def update_state(self, y_true, y_pred, sample_weight=None): + metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight) + + def reset_state(self): + num_thresholds = len(self.thresholds) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_positives.assign(ops.zeros((num_thresholds,))) + self.true_negatives.assign(ops.zeros((num_thresholds,))) + self.false_negatives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = {'class_id': self.class_id} + base_config = super().get_config() + return {**base_config, **config} + + def _find_max_under_constraint(self, constrained, dependent, predicate): + feasible = ops.nonzero(predicate(constrained, self.value)) + feasible_exists = ops.greater(ops.size(feasible), 0) + max_dependent = ops.max(ops.take(dependent, feasible), initial=0) + return ops.where(feasible_exists, max_dependent, 0.0) + +@keras_export('keras.metrics.SensitivityAtSpecificity') +class SensitivityAtSpecificity(SensitivitySpecificityBase): + + def __init__(self, specificity, num_thresholds=200, class_id=None, name=None, dtype=None): + if specificity < 0 or specificity > 1: + raise ValueError(f'Argument `specificity` must be in the range [0, 1]. Received: specificity={specificity}') + self.specificity = specificity + self.num_thresholds = num_thresholds + super().__init__(specificity, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) + + def result(self): + sensitivities = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + specificities = ops.divide_no_nan(self.true_negatives, ops.add(self.true_negatives, self.false_positives)) + return self._find_max_under_constraint(specificities, sensitivities, ops.greater_equal) + + def get_config(self): + config = {'num_thresholds': self.num_thresholds, 'specificity': self.specificity} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.SpecificityAtSensitivity') +class SpecificityAtSensitivity(SensitivitySpecificityBase): + + def __init__(self, sensitivity, num_thresholds=200, class_id=None, name=None, dtype=None): + if sensitivity < 0 or sensitivity > 1: + raise ValueError(f'Argument `sensitivity` must be in the range [0, 1]. Received: sensitivity={sensitivity}') + self.sensitivity = sensitivity + self.num_thresholds = num_thresholds + super().__init__(sensitivity, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) + + def result(self): + sensitivities = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + specificities = ops.divide_no_nan(self.true_negatives, ops.add(self.true_negatives, self.false_positives)) + return self._find_max_under_constraint(sensitivities, specificities, ops.greater_equal) + + def get_config(self): + config = {'num_thresholds': self.num_thresholds, 'sensitivity': self.sensitivity} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.PrecisionAtRecall') +class PrecisionAtRecall(SensitivitySpecificityBase): + + def __init__(self, recall, num_thresholds=200, class_id=None, name=None, dtype=None): + if recall < 0 or recall > 1: + raise ValueError(f'Argument `recall` must be in the range [0, 1]. Received: recall={recall}') + self.recall = recall + self.num_thresholds = num_thresholds + super().__init__(value=recall, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) + + def result(self): + recalls = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + precisions = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives)) + return self._find_max_under_constraint(recalls, precisions, ops.greater_equal) + + def get_config(self): + config = {'num_thresholds': self.num_thresholds, 'recall': self.recall} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.RecallAtPrecision') +class RecallAtPrecision(SensitivitySpecificityBase): + + def __init__(self, precision, num_thresholds=200, class_id=None, name=None, dtype=None): + if precision < 0 or precision > 1: + raise ValueError(f'Argument `precision` must be in the range [0, 1]. Received: precision={precision}') + self.precision = precision + self.num_thresholds = num_thresholds + super().__init__(value=precision, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype) + + def result(self): + recalls = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + precisions = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives)) + return self._find_max_under_constraint(precisions, recalls, ops.greater_equal) + + def get_config(self): + config = {'num_thresholds': self.num_thresholds, 'precision': self.precision} + base_config = super().get_config() + return {**base_config, **config} + +@keras_export('keras.metrics.AUC') +class AUC(Metric): + + def __init__(self, num_thresholds=200, curve='ROC', summation_method='interpolation', name=None, dtype=None, thresholds=None, multi_label=False, num_labels=None, label_weights=None, from_logits=False): + self._direction = 'up' + if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(metrics_utils.AUCCurve): + raise ValueError(f'Invalid `curve` argument value "{curve}". Expected one of: {list(metrics_utils.AUCCurve)}') + if isinstance(summation_method, metrics_utils.AUCSummationMethod) and summation_method not in list(metrics_utils.AUCSummationMethod): + raise ValueError(f'Invalid `summation_method` argument value "{summation_method}". Expected one of: {list(metrics_utils.AUCSummationMethod)}') + self._init_from_thresholds = thresholds is not None + if thresholds is not None: + self.num_thresholds = len(thresholds) + 2 + thresholds = sorted(thresholds) + self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(np.array([0.0] + thresholds + [1.0])) + else: + if num_thresholds <= 1: + raise ValueError(f'Argument `num_thresholds` must be an integer > 1. Received: num_thresholds={num_thresholds}') + self.num_thresholds = num_thresholds + thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] + self._thresholds_distributed_evenly = True + self._thresholds = np.array([0.0 - backend.epsilon()] + thresholds + [1.0 + backend.epsilon()]) + if isinstance(curve, metrics_utils.AUCCurve): + self.curve = curve + else: + self.curve = metrics_utils.AUCCurve.from_str(curve) + if isinstance(summation_method, metrics_utils.AUCSummationMethod): + self.summation_method = summation_method + else: + self.summation_method = metrics_utils.AUCSummationMethod.from_str(summation_method) + super().__init__(name=name, dtype=dtype) + self.multi_label = multi_label + self.num_labels = num_labels + if label_weights is not None: + label_weights = ops.array(label_weights, dtype=self.dtype) + self.label_weights = label_weights + else: + self.label_weights = None + self._from_logits = from_logits + self._built = False + if self.multi_label: + if num_labels: + shape = [None, num_labels] + self._build(shape) + else: + if num_labels: + raise ValueError('`num_labels` is needed only when `multi_label` is True.') + self._build(None) + + @property + def thresholds(self): + return list(self._thresholds) + + def _build(self, shape): + if self.multi_label: + if len(shape) != 2: + raise ValueError(f'`y_pred` must have rank 2 when `multi_label=True`. Found rank {len(shape)}. Full shape received for `y_pred`: {shape}') + self._num_labels = shape[1] + variable_shape = [self.num_thresholds, self._num_labels] + else: + variable_shape = [self.num_thresholds] + self._build_input_shape = shape + self.true_positives = self.add_variable(shape=variable_shape, initializer=initializers.Zeros(), name='true_positives') + self.false_positives = self.add_variable(shape=variable_shape, initializer=initializers.Zeros(), name='false_positives') + self.true_negatives = self.add_variable(shape=variable_shape, initializer=initializers.Zeros(), name='true_negatives') + self.false_negatives = self.add_variable(shape=variable_shape, initializer=initializers.Zeros(), name='false_negatives') + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + if not self._built: + self._build(y_pred.shape) + if self.multi_label or self.label_weights is not None: + shapes = [(y_true, ('N', 'L'))] + if self.multi_label: + shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))]) + if self.label_weights is not None: + shapes.append((self.label_weights, ('L',))) + label_weights = None if self.multi_label else self.label_weights + if self._from_logits: + y_pred = activations.sigmoid(y_pred) + metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights) + + def interpolate_pr_auc(self): + dtp = ops.subtract(self.true_positives[:self.num_thresholds - 1], self.true_positives[1:]) + p = ops.add(self.true_positives, self.false_positives) + dp = ops.subtract(p[:self.num_thresholds - 1], p[1:]) + prec_slope = ops.divide_no_nan(dtp, ops.maximum(dp, 0)) + intercept = ops.subtract(self.true_positives[1:], ops.multiply(prec_slope, p[1:])) + safe_p_ratio = ops.where(ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0), ops.divide_no_nan(p[:self.num_thresholds - 1], ops.maximum(p[1:], 0)), ops.ones_like(p[1:])) + pr_auc_increment = ops.divide_no_nan(ops.multiply(prec_slope, ops.add(dtp, ops.multiply(intercept, ops.log(safe_p_ratio)))), ops.maximum(ops.add(self.true_positives[1:], self.false_negatives[1:]), 0)) + if self.multi_label: + by_label_auc = ops.sum(pr_auc_increment, axis=0) + if self.label_weights is None: + return ops.mean(by_label_auc) + else: + return ops.divide_no_nan(ops.sum(ops.multiply(by_label_auc, self.label_weights)), ops.sum(self.label_weights)) + else: + return ops.sum(pr_auc_increment) + + def result(self): + if self.curve == metrics_utils.AUCCurve.PR and self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION: + return self.interpolate_pr_auc() + recall = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives)) + if self.curve == metrics_utils.AUCCurve.ROC: + fp_rate = ops.divide_no_nan(self.false_positives, ops.add(self.false_positives, self.true_negatives)) + x = fp_rate + y = recall + else: + precision = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives)) + x = recall + y = precision + if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION: + heights = ops.divide(ops.add(y[:self.num_thresholds - 1], y[1:]), 2.0) + elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING: + heights = ops.minimum(y[:self.num_thresholds - 1], y[1:]) + else: + heights = ops.maximum(y[:self.num_thresholds - 1], y[1:]) + riemann_terms = ops.multiply(ops.subtract(x[:self.num_thresholds - 1], x[1:]), heights) + if self.multi_label: + by_label_auc = ops.sum(riemann_terms, axis=0) + if self.label_weights is None: + return ops.mean(by_label_auc) + else: + return ops.divide_no_nan(ops.sum(ops.multiply(by_label_auc, self.label_weights)), ops.sum(self.label_weights)) + else: + return ops.sum(riemann_terms) + + def reset_state(self): + if self._built: + if self.multi_label: + variable_shape = (self.num_thresholds, self._num_labels) + else: + variable_shape = (self.num_thresholds,) + self.true_positives.assign(ops.zeros(variable_shape)) + self.false_positives.assign(ops.zeros(variable_shape)) + self.true_negatives.assign(ops.zeros(variable_shape)) + self.false_negatives.assign(ops.zeros(variable_shape)) + + def get_config(self): + label_weights = self.label_weights + config = {'num_thresholds': self.num_thresholds, 'curve': self.curve.value, 'summation_method': self.summation_method.value, 'multi_label': self.multi_label, 'num_labels': self.num_labels, 'label_weights': label_weights, 'from_logits': self._from_logits} + if self._init_from_thresholds: + config['thresholds'] = self.thresholds[1:-1] + base_config = super().get_config() + return {**base_config, **config} + +# File: keras-master/keras/src/metrics/f_score_metrics.py +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric + +@keras_export('keras.metrics.FBetaScore') +class FBetaScore(Metric): + + def __init__(self, average=None, beta=1.0, threshold=None, name='fbeta_score', dtype=None): + super().__init__(name=name, dtype=dtype) + self._direction = 'up' + if average not in (None, 'micro', 'macro', 'weighted'): + raise ValueError(f"Invalid `average` argument value. Expected one of: {{None, 'micro', 'macro', 'weighted'}}. Received: average={average}") + if not isinstance(beta, float): + raise ValueError(f"Invalid `beta` argument value. It should be a Python float. Received: beta={beta} of type '{type(beta)}'") + if beta <= 0.0: + raise ValueError(f'Invalid `beta` argument value. It should be > 0. Received: beta={beta}') + if threshold is not None: + if not isinstance(threshold, float): + raise ValueError(f"Invalid `threshold` argument value. It should be a Python float. Received: threshold={threshold} of type '{type(threshold)}'") + if threshold > 1.0 or threshold <= 0.0: + raise ValueError(f'Invalid `threshold` argument value. It should verify 0 < threshold <= 1. Received: threshold={threshold}') + self.average = average + self.beta = beta + self.threshold = threshold + self.axis = None + self._built = False + if self.average != 'micro': + self.axis = 0 + + def _build(self, y_true_shape, y_pred_shape): + if len(y_pred_shape) != 2 or len(y_true_shape) != 2: + raise ValueError(f'FBetaScore expects 2D inputs with shape (batch_size, output_dim). Received input shapes: y_pred.shape={y_pred_shape} and y_true.shape={y_true_shape}.') + if y_pred_shape[-1] is None or y_true_shape[-1] is None: + raise ValueError(f'FBetaScore expects 2D inputs with shape (batch_size, output_dim), with output_dim fully defined (not None). Received input shapes: y_pred.shape={y_pred_shape} and y_true.shape={y_true_shape}.') + num_classes = y_pred_shape[-1] + if self.average != 'micro': + init_shape = (num_classes,) + else: + init_shape = () + + def _add_zeros_variable(name): + return self.add_variable(name=name, shape=init_shape, initializer=initializers.Zeros(), dtype=self.dtype) + self.true_positives = _add_zeros_variable('true_positives') + self.false_positives = _add_zeros_variable('false_positives') + self.false_negatives = _add_zeros_variable('false_negatives') + self.intermediate_weights = _add_zeros_variable('intermediate_weights') + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) + if not self._built: + self._build(y_true.shape, y_pred.shape) + if self.threshold is None: + threshold = ops.max(y_pred, axis=-1, keepdims=True) + y_pred = ops.logical_and(y_pred >= threshold, ops.abs(y_pred) > 1e-09) + else: + y_pred = y_pred > self.threshold + y_pred = ops.cast(y_pred, dtype=self.dtype) + y_true = ops.cast(y_true, dtype=self.dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) + + def _weighted_sum(val, sample_weight): + if sample_weight is not None: + val = ops.multiply(val, ops.expand_dims(sample_weight, 1)) + return ops.sum(val, axis=self.axis) + self.true_positives.assign(self.true_positives + _weighted_sum(y_pred * y_true, sample_weight)) + self.false_positives.assign(self.false_positives + _weighted_sum(y_pred * (1 - y_true), sample_weight)) + self.false_negatives.assign(self.false_negatives + _weighted_sum((1 - y_pred) * y_true, sample_weight)) + self.intermediate_weights.assign(self.intermediate_weights + _weighted_sum(y_true, sample_weight)) + + def result(self): + precision = ops.divide(self.true_positives, self.true_positives + self.false_positives + backend.epsilon()) + recall = ops.divide(self.true_positives, self.true_positives + self.false_negatives + backend.epsilon()) + precision = ops.convert_to_tensor(precision, dtype=self.dtype) + recall = ops.convert_to_tensor(recall, dtype=self.dtype) + mul_value = precision * recall + add_value = self.beta ** 2 * precision + recall + mean = ops.divide(mul_value, add_value + backend.epsilon()) + f1_score = mean * (1 + self.beta ** 2) + if self.average == 'weighted': + weights = ops.divide(self.intermediate_weights, ops.sum(self.intermediate_weights) + backend.epsilon()) + f1_score = ops.sum(f1_score * weights) + elif self.average is not None: + f1_score = ops.mean(f1_score) + return f1_score + + def get_config(self): + config = {'name': self.name, 'dtype': self.dtype, 'average': self.average, 'beta': self.beta, 'threshold': self.threshold} + base_config = super().get_config() + return {**base_config, **config} + + def reset_state(self): + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + +@keras_export('keras.metrics.F1Score') +class F1Score(FBetaScore): + + def __init__(self, average=None, threshold=None, name='f1_score', dtype=None): + super().__init__(average=average, beta=1.0, threshold=threshold, name=name, dtype=dtype) + + def get_config(self): + base_config = super().get_config() + del base_config['beta'] + return base_config + +# File: keras-master/keras/src/metrics/hinge_metrics.py +from keras.src.api_export import keras_export +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import reduction_metrics + +@keras_export('keras.metrics.Hinge') +class Hinge(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='hinge', dtype=None): + super().__init__(fn=hinge, name=name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.SquaredHinge') +class SquaredHinge(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='squared_hinge', dtype=None): + super().__init__(fn=squared_hinge, name=name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.CategoricalHinge') +class CategoricalHinge(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='categorical_hinge', dtype=None): + super().__init__(fn=categorical_hinge, name=name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +# File: keras-master/keras/src/metrics/iou_metrics.py +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.metrics.metrics_utils import confusion_matrix + +class _IoUBase(Metric): + + def __init__(self, num_classes, name=None, dtype=None, ignore_class=None, sparse_y_true=True, sparse_y_pred=True, axis=-1): + super().__init__(name=name, dtype=dtype or 'float32') + self._direction = 'up' + self.num_classes = num_classes + self.ignore_class = ignore_class + self.sparse_y_true = sparse_y_true + self.sparse_y_pred = sparse_y_pred + self.axis = axis + self.total_cm = self.add_variable(name='total_confusion_matrix', shape=(num_classes, num_classes), initializer=initializers.Zeros()) + + def update_state(self, y_true, y_pred, sample_weight=None): + if not self.sparse_y_true: + y_true = ops.argmax(y_true, axis=self.axis) + if not self.sparse_y_pred: + y_pred = ops.argmax(y_pred, axis=self.axis) + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) + if len(y_pred.shape) > 1: + y_pred = ops.reshape(y_pred, [-1]) + if len(y_true.shape) > 1: + y_true = ops.reshape(y_true, [-1]) + if sample_weight is None: + sample_weight = 1 + sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) + if len(sample_weight.shape) > 1: + sample_weight = ops.reshape(sample_weight, [-1]) + sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) + if self.ignore_class is not None: + ignore_class = ops.convert_to_tensor(self.ignore_class, y_true.dtype) + valid_mask = ops.not_equal(y_true, ignore_class) + y_true = y_true * ops.cast(valid_mask, y_true.dtype) + y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype) + if sample_weight is not None: + sample_weight = sample_weight * ops.cast(valid_mask, sample_weight.dtype) + y_pred = ops.cast(y_pred, dtype=self.dtype) + y_true = ops.cast(y_true, dtype=self.dtype) + sample_weight = ops.cast(sample_weight, dtype=self.dtype) + current_cm = confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype='float32') + return self.total_cm.assign(self.total_cm + current_cm) + + def reset_state(self): + self.total_cm.assign(ops.zeros(self.total_cm.shape, dtype=self.total_cm.dtype)) + +@keras_export('keras.metrics.IoU') +class IoU(_IoUBase): + + def __init__(self, num_classes, target_class_ids, name=None, dtype=None, ignore_class=None, sparse_y_true=True, sparse_y_pred=True, axis=-1): + super().__init__(name=name, num_classes=num_classes, ignore_class=ignore_class, sparse_y_true=sparse_y_true, sparse_y_pred=sparse_y_pred, axis=axis, dtype=dtype) + if max(target_class_ids) >= num_classes: + raise ValueError(f'Target class id {max(target_class_ids)} is out of range, which is [{0}, {num_classes}).') + self.target_class_ids = list(target_class_ids) + + def result(self): + sum_over_row = ops.cast(ops.sum(self.total_cm, axis=0), dtype=self.dtype) + sum_over_col = ops.cast(ops.sum(self.total_cm, axis=1), dtype=self.dtype) + true_positives = ops.cast(ops.diag(self.total_cm), dtype=self.dtype) + denominator = sum_over_row + sum_over_col - true_positives + target_class_ids = ops.convert_to_tensor(self.target_class_ids, dtype='int32') + true_positives = ops.take_along_axis(true_positives, target_class_ids, axis=-1) + denominator = ops.take_along_axis(denominator, target_class_ids, axis=-1) + num_valid_entries = ops.sum(ops.cast(ops.greater(denominator, 1e-09), dtype=self.dtype)) + iou = ops.divide(true_positives, denominator + backend.epsilon()) + return ops.divide(ops.sum(iou, axis=self.axis), num_valid_entries + backend.epsilon()) + + def get_config(self): + config = {'num_classes': self.num_classes, 'target_class_ids': self.target_class_ids, 'ignore_class': self.ignore_class, 'sparse_y_true': self.sparse_y_true, 'sparse_y_pred': self.sparse_y_pred, 'axis': self.axis} + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + +@keras_export('keras.metrics.BinaryIoU') +class BinaryIoU(IoU): + + def __init__(self, target_class_ids=(0, 1), threshold=0.5, name=None, dtype=None): + super().__init__(num_classes=2, target_class_ids=target_class_ids, name=name, dtype=dtype) + self.threshold = threshold + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) + y_pred = ops.cast(y_pred >= self.threshold, self.dtype) + return super().update_state(y_true, y_pred, sample_weight) + + def get_config(self): + return {'target_class_ids': self.target_class_ids, 'threshold': self.threshold, 'name': self.name, 'dtype': self._dtype} + +@keras_export('keras.metrics.MeanIoU') +class MeanIoU(IoU): + + def __init__(self, num_classes, name=None, dtype=None, ignore_class=None, sparse_y_true=True, sparse_y_pred=True, axis=-1): + target_class_ids = list(range(num_classes)) + super().__init__(name=name, num_classes=num_classes, target_class_ids=target_class_ids, axis=axis, dtype=dtype, ignore_class=ignore_class, sparse_y_true=sparse_y_true, sparse_y_pred=sparse_y_pred) + + def get_config(self): + return {'num_classes': self.num_classes, 'name': self.name, 'dtype': self._dtype, 'ignore_class': self.ignore_class, 'sparse_y_true': self.sparse_y_true, 'sparse_y_pred': self.sparse_y_pred, 'axis': self.axis} + +@keras_export('keras.metrics.OneHotIoU') +class OneHotIoU(IoU): + + def __init__(self, num_classes, target_class_ids, name=None, dtype=None, ignore_class=None, sparse_y_pred=False, axis=-1): + super().__init__(num_classes=num_classes, target_class_ids=target_class_ids, name=name, dtype=dtype, ignore_class=ignore_class, sparse_y_true=False, sparse_y_pred=sparse_y_pred, axis=axis) + + def get_config(self): + return {'num_classes': self.num_classes, 'target_class_ids': self.target_class_ids, 'name': self.name, 'dtype': self._dtype, 'ignore_class': self.ignore_class, 'sparse_y_pred': self.sparse_y_pred, 'axis': self.axis} + +@keras_export('keras.metrics.OneHotMeanIoU') +class OneHotMeanIoU(MeanIoU): + + def __init__(self, num_classes, name=None, dtype=None, ignore_class=None, sparse_y_pred=False, axis=-1): + super().__init__(num_classes=num_classes, axis=axis, name=name, dtype=dtype, ignore_class=ignore_class, sparse_y_true=False, sparse_y_pred=sparse_y_pred) + + def get_config(self): + return {'num_classes': self.num_classes, 'name': self.name, 'dtype': self._dtype, 'ignore_class': self.ignore_class, 'sparse_y_pred': self.sparse_y_pred, 'axis': self.axis} + +# File: keras-master/keras/src/metrics/metric.py +from keras.src import backend +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils.naming import auto_name +from keras.src.utils.tracking import Tracker + +@keras_export(['keras.Metric', 'keras.metrics.Metric']) +class Metric(KerasSaveable): + + def __init__(self, dtype=None, name=None): + self.name = name or auto_name(self.__class__.__name__) + self._dtype_policy = dtype_policies.get(dtype or backend.floatx()) + self._dtype = self._dtype_policy.compute_dtype + self._metrics = [] + self._variables = [] + self._tracker = Tracker({'variables': (lambda x: isinstance(x, backend.Variable), self._variables), 'metrics': (lambda x: isinstance(x, Metric), self._metrics)}) + + def reset_state(self): + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + + def update_state(self, *args, **kwargs): + raise NotImplementedError + + def stateless_update_state(self, metric_variables, *args, **kwargs): + if len(metric_variables) != len(self.variables): + raise ValueError(f'Argument `metric_variables` must be a list of tensors corresponding 1:1 to {self.__class__.__name__}().variables. Received list with length {len(metric_variables)}, but expected {len(self.variables)} variables.') + mapping = list(zip(self.variables, metric_variables)) + with backend.StatelessScope(state_mapping=mapping) as scope: + self.update_state(*args, **kwargs) + metric_variables = [] + for v in self.variables: + new_v = scope.get_current_value(v) + if new_v is not None: + metric_variables.append(new_v) + else: + metric_variables.append(v) + return metric_variables + + def result(self): + raise NotImplementedError + + def stateless_result(self, metric_variables): + if len(metric_variables) != len(self.variables): + raise ValueError(f'Argument `metric_variables` must be a list of tensors corresponding 1:1 to {self.__class__.__name__}().variables. Received list with length {len(metric_variables)}, but expected {len(self.variables)} variables.') + mapping = list(zip(self.variables, metric_variables)) + with backend.StatelessScope(state_mapping=mapping): + res = self.result() + return res + + def stateless_reset_state(self): + with backend.StatelessScope() as scope: + self.reset_state() + metric_variables = [] + for v in self.variables: + new_v = scope.get_current_value(v) + if new_v is not None: + metric_variables.append(new_v) + else: + metric_variables.append(v) + return metric_variables + + @property + def dtype(self): + return self._dtype + + def _obj_type(self): + return 'Metric' + + def add_variable(self, shape, initializer, dtype=None, aggregation='sum', name=None): + self._check_super_called() + with backend.name_scope(self.name.replace('/', '>'), caller=self): + initializer = initializers.get(initializer) + variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=False, aggregation=aggregation, name=name) + self._tracker.add_to_store('variables', variable) + return variable + + def add_weight(self, shape=(), initializer=None, dtype=None, name=None): + return self.add_variable(shape=shape, initializer=initializer, dtype=dtype, name=name) + + @property + def variables(self): + variables = list(self._variables) + for metric in self._metrics: + variables.extend(metric.variables) + return variables + + def __call__(self, *args, **kwargs): + self._check_super_called() + self.update_state(*args, **kwargs) + return self.result() + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def __setattr__(self, name, value): + if hasattr(self, '_tracker'): + value = self._tracker.track(value) + return super().__setattr__(name, value) + + def _check_super_called(self): + if not hasattr(self, '_tracker'): + raise RuntimeError('You forgot to call `super().__init__()` in the `__init__()` method. Go add it!') + + def __repr__(self): + return f'<{self.__class__.__name__} name={self.name}>' + + def __str__(self): + return self.__repr__() + +# File: keras-master/keras/src/metrics/metrics_utils.py +from enum import Enum +import numpy as np +from keras.src import backend +from keras.src import ops +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.utils.python_utils import to_list +NEG_INF = -10000000000.0 + +def assert_thresholds_range(thresholds): + if thresholds is not None: + invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1] + if invalid_thresholds: + raise ValueError(f'Threshold values must be in [0, 1]. Received: {invalid_thresholds}') + +def parse_init_thresholds(thresholds, default_threshold=0.5): + if thresholds is not None: + assert_thresholds_range(to_list(thresholds)) + thresholds = to_list(default_threshold if thresholds is None else thresholds) + return thresholds + +class ConfusionMatrix(Enum): + TRUE_POSITIVES = 'tp' + FALSE_POSITIVES = 'fp' + TRUE_NEGATIVES = 'tn' + FALSE_NEGATIVES = 'fn' + +class AUCCurve(Enum): + ROC = 'ROC' + PR = 'PR' + + @staticmethod + def from_str(key): + if key in ('pr', 'PR'): + return AUCCurve.PR + elif key in ('roc', 'ROC'): + return AUCCurve.ROC + else: + raise ValueError(f'Invalid AUC curve value: "{key}". Expected values are ["PR", "ROC"]') + +class AUCSummationMethod(Enum): + INTERPOLATION = 'interpolation' + MAJORING = 'majoring' + MINORING = 'minoring' + + @staticmethod + def from_str(key): + if key in ('interpolation', 'Interpolation'): + return AUCSummationMethod.INTERPOLATION + elif key in ('majoring', 'Majoring'): + return AUCSummationMethod.MAJORING + elif key in ('minoring', 'Minoring'): + return AUCSummationMethod.MINORING + else: + raise ValueError(f'Invalid AUC summation method value: "{key}". Expected values are ["interpolation", "majoring", "minoring"]') + +def _update_confusion_matrix_variables_optimized(variables_to_update, y_true, y_pred, thresholds, multi_label=False, sample_weights=None, label_weights=None, thresholds_with_epsilon=False): + num_thresholds = ops.shape(thresholds)[0] + if sample_weights is None: + sample_weights = 1.0 + else: + sample_weights = ops.broadcast_to(ops.cast(sample_weights, dtype=y_pred.dtype), ops.shape(y_pred)) + if not multi_label: + sample_weights = ops.reshape(sample_weights, [-1]) + if label_weights is None: + label_weights = 1.0 + else: + label_weights = ops.expand_dims(label_weights, 0) + label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred)) + if not multi_label: + label_weights = ops.reshape(label_weights, [-1]) + weights = ops.cast(ops.multiply(sample_weights, label_weights), y_true.dtype) + y_pred = ops.clip(y_pred, x_min=0.0, x_max=1.0) + y_true = ops.cast(ops.cast(y_true, 'bool'), y_true.dtype) + if not multi_label: + y_true = ops.reshape(y_true, [-1]) + y_pred = ops.reshape(y_pred, [-1]) + true_labels = ops.multiply(y_true, weights) + false_labels = ops.multiply(1.0 - y_true, weights) + bucket_indices = ops.ceil(y_pred * (ops.cast(num_thresholds, dtype=y_pred.dtype) - 1)) - 1 + if thresholds_with_epsilon: + bucket_indices = ops.relu(bucket_indices) + bucket_indices = ops.cast(bucket_indices, 'int32') + if multi_label: + true_labels = ops.transpose(true_labels) + false_labels = ops.transpose(false_labels) + bucket_indices = ops.transpose(bucket_indices) + + def gather_bucket(label_and_bucket_index): + (label, bucket_index) = (label_and_bucket_index[0], label_and_bucket_index[1]) + return ops.segment_sum(data=label, segment_ids=bucket_index, num_segments=num_thresholds) + tp_bucket_v = backend.vectorized_map(gather_bucket, (true_labels, bucket_indices)) + fp_bucket_v = backend.vectorized_map(gather_bucket, (false_labels, bucket_indices)) + tp = ops.transpose(ops.flip(ops.cumsum(ops.flip(tp_bucket_v), axis=1))) + fp = ops.transpose(ops.flip(ops.cumsum(ops.flip(fp_bucket_v), axis=1))) + else: + tp_bucket_v = ops.segment_sum(data=true_labels, segment_ids=bucket_indices, num_segments=num_thresholds) + fp_bucket_v = ops.segment_sum(data=false_labels, segment_ids=bucket_indices, num_segments=num_thresholds) + tp = ops.flip(ops.cumsum(ops.flip(tp_bucket_v))) + fp = ops.flip(ops.cumsum(ops.flip(fp_bucket_v))) + if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update or ConfusionMatrix.FALSE_NEGATIVES in variables_to_update: + if multi_label: + total_true_labels = ops.sum(true_labels, axis=1) + total_false_labels = ops.sum(false_labels, axis=1) + else: + total_true_labels = ops.sum(true_labels) + total_false_labels = ops.sum(false_labels) + if ConfusionMatrix.TRUE_POSITIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES] + variable.assign(variable + tp) + if ConfusionMatrix.FALSE_POSITIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES] + variable.assign(variable + fp) + if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES] + tn = total_false_labels - fp + variable.assign(variable + tn) + if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES] + fn = total_true_labels - tp + variable.assign(variable + fn) + +def is_evenly_distributed_thresholds(thresholds): + num_thresholds = len(thresholds) + if num_thresholds < 3: + return False + even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (num_thresholds - 1) + return np.allclose(thresholds, even_thresholds, atol=backend.epsilon()) + +def update_confusion_matrix_variables(variables_to_update, y_true, y_pred, thresholds, top_k=None, class_id=None, sample_weight=None, multi_label=False, label_weights=None, thresholds_distributed_evenly=False): + if multi_label and label_weights is not None: + raise ValueError('`label_weights` for multilabel data should be handled outside of `update_confusion_matrix_variables` when `multi_label` is True.') + if variables_to_update is None: + return + if not any((key for key in variables_to_update if key in list(ConfusionMatrix))): + raise ValueError(f'Please provide at least one valid confusion matrix variable to update. Valid variable key options are: "{list(ConfusionMatrix)}". Received: "{variables_to_update.keys()}"') + variable_dtype = list(variables_to_update.values())[0].dtype + y_true = ops.cast(y_true, dtype=variable_dtype) + y_pred = ops.cast(y_pred, dtype=variable_dtype) + if thresholds_distributed_evenly: + thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0 + thresholds = ops.convert_to_tensor(thresholds, dtype=variable_dtype) + num_thresholds = ops.shape(thresholds)[0] + if multi_label: + one_thresh = ops.equal(np.array(1, dtype='int32'), len(thresholds.shape)) + else: + one_thresh = np.array(True, dtype='bool') + invalid_keys = [key for key in variables_to_update if key not in list(ConfusionMatrix)] + if invalid_keys: + raise ValueError(f'Invalid keys: "{invalid_keys}". Valid variable key options are: "{list(ConfusionMatrix)}"') + (y_pred, y_true) = squeeze_or_expand_to_same_rank(y_pred, y_true) + if sample_weight is not None: + sample_weight = ops.expand_dims(ops.cast(sample_weight, dtype=variable_dtype), axis=-1) + (_, sample_weight) = squeeze_or_expand_to_same_rank(y_true, sample_weight, expand_rank_1=False) + if top_k is not None: + y_pred = _filter_top_k(y_pred, top_k) + if class_id is not None: + if len(y_pred.shape) == 1: + raise ValueError(f'When class_id is provided, y_pred must be a 2D array with shape (num_samples, num_classes), found shape: {y_pred.shape}') + y_true = y_true[..., class_id, None] + y_pred = y_pred[..., class_id, None] + if thresholds_distributed_evenly: + return _update_confusion_matrix_variables_optimized(variables_to_update, y_true, y_pred, thresholds, multi_label=multi_label, sample_weights=sample_weight, label_weights=label_weights, thresholds_with_epsilon=thresholds_with_epsilon) + if None in y_pred.shape: + pred_shape = ops.shape(y_pred) + num_predictions = pred_shape[0] + if len(y_pred.shape) == 1: + num_labels = 1 + else: + num_labels = ops.cast(ops.prod(ops.array(pred_shape[1:]), axis=0), 'int32') + thresh_label_tile = ops.where(one_thresh, num_labels, 1) + else: + pred_shape = ops.shape(y_pred) + num_predictions = pred_shape[0] + if len(y_pred.shape) == 1: + num_labels = 1 + else: + num_labels = np.prod(pred_shape[1:], axis=0).astype('int32') + thresh_label_tile = np.where(one_thresh, num_labels, 1) + if multi_label: + predictions_extra_dim = ops.expand_dims(y_pred, 0) + labels_extra_dim = ops.expand_dims(ops.cast(y_true, dtype='bool'), 0) + else: + predictions_extra_dim = ops.reshape(y_pred, [1, -1]) + labels_extra_dim = ops.reshape(ops.cast(y_true, dtype='bool'), [1, -1]) + if multi_label: + thresh_pretile_shape = [num_thresholds, 1, -1] + thresh_tiles = [1, num_predictions, thresh_label_tile] + data_tiles = [num_thresholds, 1, 1] + else: + thresh_pretile_shape = [num_thresholds, -1] + thresh_tiles = [1, num_predictions * num_labels] + data_tiles = [num_thresholds, 1] + thresh_tiled = ops.tile(ops.reshape(thresholds, thresh_pretile_shape), thresh_tiles) + preds_tiled = ops.tile(predictions_extra_dim, data_tiles) + pred_is_pos = ops.greater(preds_tiled, thresh_tiled) + label_is_pos = ops.tile(labels_extra_dim, data_tiles) + if sample_weight is not None: + sample_weight = ops.broadcast_to(ops.cast(sample_weight, dtype=y_pred.dtype), ops.shape(y_pred)) + weights_tiled = ops.tile(ops.reshape(sample_weight, thresh_tiles), data_tiles) + else: + weights_tiled = None + if label_weights is not None and (not multi_label): + label_weights = ops.expand_dims(label_weights, 0) + label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred)) + label_weights_tiled = ops.tile(ops.reshape(label_weights, thresh_tiles), data_tiles) + if weights_tiled is None: + weights_tiled = label_weights_tiled + else: + weights_tiled = ops.multiply(weights_tiled, label_weights_tiled) + + def weighted_assign_add(label, pred, weights, var): + label_and_pred = ops.cast(ops.logical_and(label, pred), dtype=var.dtype) + if weights is not None: + label_and_pred *= ops.cast(weights, dtype=var.dtype) + var.assign(var + ops.sum(label_and_pred, 1)) + loop_vars = {ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos)} + update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update + update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update + update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update + if update_fn or update_tn: + pred_is_neg = ops.logical_not(pred_is_pos) + loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg) + if update_fp or update_tn: + label_is_neg = ops.logical_not(label_is_pos) + loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos) + if update_tn: + loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg) + for (matrix_cond, (label, pred)) in loop_vars.items(): + if matrix_cond in variables_to_update: + weighted_assign_add(label, pred, weights_tiled, variables_to_update[matrix_cond]) + +def _filter_top_k(x, k): + (_, top_k_idx) = ops.top_k(x, k) + top_k_mask = ops.sum(ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2) + return x * top_k_mask + NEG_INF * (1 - top_k_mask) + +def confusion_matrix(labels, predictions, num_classes, weights=None, dtype='int32'): + labels = ops.convert_to_tensor(labels, dtype) + predictions = ops.convert_to_tensor(predictions, dtype) + (labels, predictions) = squeeze_or_expand_to_same_rank(labels, predictions) + predictions = ops.cast(predictions, dtype) + labels = ops.cast(labels, dtype) + if weights is not None: + weights = ops.convert_to_tensor(weights, dtype) + indices = ops.stack([labels, predictions], axis=1) + values = ops.ones_like(predictions, dtype) if weights is None else weights + indices = ops.cast(indices, dtype='int64') + values = ops.cast(values, dtype=dtype) + num_classes = int(num_classes) + confusion_matrix = ops.scatter(indices, values, (num_classes, num_classes)) + return confusion_matrix + +# File: keras-master/keras/src/metrics/probabilistic_metrics.py +from keras.src.api_export import keras_export +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.metrics import reduction_metrics + +@keras_export('keras.metrics.KLDivergence') +class KLDivergence(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='kl_divergence', dtype=None): + super().__init__(fn=kl_divergence, name=name, dtype=dtype) + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.Poisson') +class Poisson(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='poisson', dtype=None): + super().__init__(fn=poisson, name=name, dtype=dtype) + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.BinaryCrossentropy') +class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='binary_crossentropy', dtype=None, from_logits=False, label_smoothing=0): + super().__init__(binary_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing} + +@keras_export('keras.metrics.CategoricalCrossentropy') +class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='categorical_crossentropy', dtype=None, from_logits=False, label_smoothing=0, axis=-1): + super().__init__(categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis} + +@keras_export('keras.metrics.SparseCategoricalCrossentropy') +class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='sparse_categorical_crossentropy', dtype=None, from_logits=False, axis=-1): + super().__init__(sparse_categorical_crossentropy, name=name, dtype=dtype, from_logits=from_logits, axis=axis) + self.from_logits = from_logits + self.axis = axis + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype, 'from_logits': self.from_logits, 'axis': self.axis} + +# File: keras-master/keras/src/metrics/reduction_metrics.py +from keras.src import backend +from keras.src import initializers +from keras.src import losses +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.saving import serialization_lib + +def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype): + dtype = dtype or backend.floatx() + mask = getattr(values, '_keras_mask', None) + values = ops.cast(values, dtype=dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype) + if mask is not None: + sample_weight = losses.loss.apply_mask(sample_weight, mask, dtype=dtype, reduction='sum') + (values, sample_weight) = losses.loss.squeeze_or_expand_to_same_rank(values, sample_weight) + weight_ndim = len(sample_weight.shape) + values_ndim = len(values.shape) + if values_ndim > weight_ndim: + values = reduce_fn(values, axis=list(range(weight_ndim, values_ndim))) + sample_weight = ops.broadcast_to(sample_weight, ops.shape(values)) + values = values * sample_weight + if weight_ndim > 1: + sample_weight = reduce_fn(sample_weight, axis=list(range(1, weight_ndim))) + values_ndim = len(values.shape) + if values_ndim > 1: + values = reduce_fn(values, axis=list(range(1, values_ndim))) + return (values, sample_weight) + +@keras_export('keras.metrics.Sum') +class Sum(Metric): + + def __init__(self, name='sum', dtype=None): + super().__init__(name=name, dtype=dtype) + self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total') + + def update_state(self, values, sample_weight=None): + (values, _) = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype) + self.total.assign_add(ops.sum(values)) + + def reset_state(self): + self.total.assign(0) + + def result(self): + return ops.cast(self.total, self.dtype) + +@keras_export('keras.metrics.Mean') +class Mean(Metric): + + def __init__(self, name='mean', dtype=None): + super().__init__(name=name, dtype=dtype) + self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total') + self.count = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='count') + + def update_state(self, values, sample_weight=None): + (values, sample_weight) = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype) + self.total.assign_add(ops.sum(values)) + if sample_weight is not None: + num_samples = ops.sum(sample_weight) + elif len(values.shape) >= 1: + num_samples = ops.shape(values)[0] + else: + num_samples = 1 + self.count.assign_add(ops.cast(num_samples, dtype=self.dtype)) + + def reset_state(self): + self.total.assign(0) + self.count.assign(0) + + def result(self): + return ops.divide_no_nan(self.total, ops.cast(self.count, dtype=self.dtype)) + +@keras_export('keras.metrics.MeanMetricWrapper') +class MeanMetricWrapper(Mean): + + def __init__(self, fn, name=None, dtype=None, **kwargs): + super().__init__(name=name, dtype=dtype) + self._fn = fn + self._fn_kwargs = kwargs + if self._fn in losses.ALL_OBJECTS or (hasattr(self._fn, '__class__') and self._fn.__class__ in losses.ALL_OBJECTS): + self._direction = 'down' + + def update_state(self, y_true, y_pred, sample_weight=None): + mask = getattr(y_pred, '_keras_mask', None) + values = self._fn(y_true, y_pred, **self._fn_kwargs) + if sample_weight is not None and mask is not None: + sample_weight = losses.loss.apply_mask(sample_weight, mask, dtype=self.dtype, reduction='sum') + return super().update_state(values, sample_weight=sample_weight) + + def get_config(self): + base_config = super().get_config() + config = {'fn': serialization_lib.serialize_keras_object(self._fn)} + config.update(serialization_lib.serialize_keras_object(self._fn_kwargs)) + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + if 'fn' in config: + config = serialization_lib.deserialize_keras_object(config) + return cls(**config) + +# File: keras-master/keras/src/metrics/regression_metrics.py +import warnings +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.metrics import reduction_metrics +from keras.src.utils.numerical_utils import normalize + +@keras_export('keras.metrics.MeanSquaredError') +class MeanSquaredError(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='mean_squared_error', dtype=None): + super().__init__(fn=mean_squared_error, name=name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.MeanAbsoluteError') +class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='mean_absolute_error', dtype=None): + super().__init__(mean_absolute_error, name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.MeanAbsolutePercentageError') +class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='mean_absolute_percentage_error', dtype=None): + super().__init__(mean_absolute_percentage_error, name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.MeanSquaredLogarithmicError') +class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='mean_squared_logarithmic_error', dtype=None): + super().__init__(mean_squared_logarithmic_error, name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.RootMeanSquaredError') +class RootMeanSquaredError(reduction_metrics.Mean): + + def __init__(self, name='root_mean_squared_error', dtype=None): + super().__init__(name, dtype=dtype) + self._direction = 'down' + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.convert_to_tensor(y_true, self._dtype) + y_pred = ops.convert_to_tensor(y_pred, self._dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + error_sq = ops.square(y_pred - y_true) + return super().update_state(error_sq, sample_weight=sample_weight) + + def result(self): + return ops.sqrt(super().result()) + +@keras_export('keras.metrics.CosineSimilarity') +class CosineSimilarity(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='cosine_similarity', dtype=None, axis=-1): + super().__init__(cosine_similarity, name, dtype=dtype, axis=axis) + self._direction = 'up' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.LogCoshError') +class LogCoshError(reduction_metrics.MeanMetricWrapper): + + def __init__(self, name='logcosh', dtype=None): + super().__init__(log_cosh, name, dtype=dtype) + self._direction = 'down' + + def get_config(self): + return {'name': self.name, 'dtype': self.dtype} + +@keras_export('keras.metrics.R2Score') +class R2Score(reduction_metrics.Metric): + + def __init__(self, class_aggregation='uniform_average', num_regressors=0, name='r2_score', dtype=None): + super().__init__(name=name, dtype=dtype) + self._direction = 'up' + valid_class_aggregation_values = (None, 'uniform_average', 'variance_weighted_average') + if class_aggregation not in valid_class_aggregation_values: + raise ValueError(f'Invalid value for argument `class_aggregation`. Expected one of {valid_class_aggregation_values}. Received: class_aggregation={class_aggregation}') + if num_regressors < 0: + raise ValueError(f'Invalid value for argument `num_regressors`. Expected a value >= 0. Received: num_regressors={num_regressors}') + self.class_aggregation = class_aggregation + self.num_regressors = num_regressors + self.num_samples = self.add_variable(shape=(), initializer=initializers.Zeros(), name='num_samples') + self._built = False + + def _build(self, y_true_shape, y_pred_shape): + if len(y_pred_shape) != 2 or len(y_true_shape) != 2: + raise ValueError(f'R2Score expects 2D inputs with shape (batch_size, output_dim). Received input shapes: y_pred.shape={y_pred_shape} and y_true.shape={y_true_shape}.') + if y_pred_shape[-1] is None or y_true_shape[-1] is None: + raise ValueError(f'R2Score expects 2D inputs with shape (batch_size, output_dim), with output_dim fully defined (not None). Received input shapes: y_pred.shape={y_pred_shape} and y_true.shape={y_true_shape}.') + num_classes = y_pred_shape[-1] + self.squared_sum = self.add_variable(name='squared_sum', shape=[num_classes], initializer=initializers.Zeros()) + self.sum = self.add_variable(name='sum', shape=[num_classes], initializer=initializers.Zeros()) + self.total_mse = self.add_variable(name='residual', shape=[num_classes], initializer=initializers.Zeros()) + self.count = self.add_variable(name='count', shape=[num_classes], initializer=initializers.Zeros()) + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.convert_to_tensor(y_true, dtype=self._dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + if not self._built: + self._build(y_true.shape, y_pred.shape) + if sample_weight is None: + sample_weight = 1 + sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) + if len(sample_weight.shape) == 1: + sample_weight = ops.expand_dims(sample_weight, axis=1) + sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) + weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype) + self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0)) + self.squared_sum.assign(self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0)) + self.total_mse.assign(self.total_mse + ops.sum((y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), axis=0)) + self.count.assign(self.count + ops.sum(sample_weight, axis=0)) + self.num_samples.assign(self.num_samples + ops.size(y_true)) + + def result(self): + mean = self.sum / self.count + total = self.squared_sum - self.sum * mean + raw_scores = 1 - self.total_mse / total + raw_scores = ops.where(ops.isinf(raw_scores), 0.0, raw_scores) + if self.class_aggregation == 'uniform_average': + r2_score = ops.mean(raw_scores) + elif self.class_aggregation == 'variance_weighted_average': + weighted_sum = ops.sum(total * raw_scores) + sum_of_weights = ops.sum(total) + r2_score = weighted_sum / sum_of_weights + else: + r2_score = raw_scores + if self.num_regressors != 0: + if self.num_regressors > self.num_samples - 1: + warnings.warn('More independent predictors than datapoints in adjusted R2 score. Falling back to standard R2 score.', stacklevel=2) + elif self.num_regressors == self.num_samples - 1: + warnings.warn('Division by zero in Adjusted R2 score. Falling back to standard R2 score.', stacklevel=2) + else: + n = ops.convert_to_tensor(self.num_samples, dtype='float32') + p = ops.convert_to_tensor(self.num_regressors, dtype='float32') + num = ops.multiply(ops.subtract(1.0, r2_score), ops.subtract(n, 1.0)) + den = ops.subtract(ops.subtract(n, p), 1.0) + r2_score = ops.subtract(1.0, ops.divide(num, den)) + return r2_score + + def reset_state(self): + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + + def get_config(self): + config = {'name': self.name, 'dtype': self.dtype, 'class_aggregation': self.class_aggregation, 'num_regressors': self.num_regressors} + base_config = super().get_config() + return {**base_config, **config} + +def cosine_similarity(y_true, y_pred, axis=-1): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + (y_true, y_pred) = squeeze_or_expand_to_same_rank(y_true, y_pred) + y_pred = normalize(y_pred, axis=axis) + y_true = normalize(y_true, axis=axis) + return ops.sum(y_true * y_pred, axis=axis) + +# File: keras-master/keras/src/models/cloning.py +from keras.src import backend +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers import Input +from keras.src.layers import InputLayer +from keras.src.models.functional import Functional +from keras.src.models.functional import functional_like_constructor +from keras.src.models.sequential import Sequential +from keras.src.saving import serialization_lib + +@keras_export('keras.models.clone_model') +def clone_model(model, input_tensors=None, clone_function=None, call_function=None, recursive=False, **kwargs): + cache = kwargs.pop('cache', None) + if kwargs: + raise ValueError(f'Unexpected keyword argument(s): {tuple(kwargs.keys())}') + if isinstance(model, Sequential): + clone_function = _wrap_clone_function(clone_function, call_function=call_function, recursive=recursive, cache=cache) + if call_function is not None: + raise ValueError(f"`call_function` argument is not supported with Sequential models. In a Sequential model, layers aren't called at model-construction time (they're merely listed). Use `call_function` with Functional models only. Received model of type '{model.__class__.__name__}', with call_function={clone_function}") + return _clone_sequential_model(model, clone_function=clone_function, input_tensors=input_tensors) + if isinstance(model, Functional): + clone_function = _wrap_clone_function(clone_function, call_function=call_function, recursive=recursive, cache=cache) + if utils.is_default(model.get_config) or (clone_function or input_tensors): + return _clone_functional_model(model, clone_function=clone_function, call_function=call_function, input_tensors=input_tensors) + if clone_function or input_tensors: + raise ValueError(f"Arguments `clone_function` and `input_tensors` are only supported for Sequential models or Functional models. Received model of type '{model.__class__.__name__}', with clone_function={clone_function} and input_tensors={input_tensors}") + if call_function is not None: + raise ValueError(f"Argument `call_function` is only supported for Functional models. Received model of type '{model.__class__.__name__}', with call_function={clone_function}") + config = serialization_lib.serialize_keras_object(model) + return serialization_lib.deserialize_keras_object(config, custom_objects={model.__class__.__name__: model.__class__}) + +def _wrap_clone_function(clone_function, call_function=None, recursive=False, cache=None): + if clone_function is None: + + def _clone_layer(layer): + return layer.__class__.from_config(layer.get_config()) + clone_function = _clone_layer + if cache is None: + cache = {} + + def wrapped_clone_function(layer): + if id(layer) in cache: + return cache[id(layer)] + if recursive: + if isinstance(layer, Sequential): + clone = clone_model(layer, clone_function=clone_function, cache=cache) + cache[id(layer)] = clone + return clone + elif isinstance(layer, Functional): + clone = clone_model(layer, clone_function=clone_function, call_function=call_function, cache=cache) + cache[id(layer)] = clone + return clone + clone = clone_function(layer) + cache[id(layer)] = clone + return clone + return wrapped_clone_function + +def _clone_sequential_model(model, clone_function, input_tensors=None): + if not isinstance(model, Sequential): + raise ValueError(f'Expected `model` argument to be a `Sequential` model instance. Received: model={model}') + if not callable(clone_function): + raise ValueError(f'Expected `clone_function` argument to be a callable. Received: clone_function={clone_function}') + new_layers = [clone_function(layer) for layer in model.layers] + if isinstance(model._layers[0], InputLayer): + ref_input_layer = model._layers[0] + input_name = ref_input_layer.name + input_batch_shape = ref_input_layer.batch_shape + input_dtype = ref_input_layer._dtype + else: + input_name = None + input_dtype = None + input_batch_shape = None + if input_tensors: + if isinstance(input_tensors, (list, tuple)): + if len(input_tensors) != 1: + raise ValueError('Argument `input_tensors` must contain a single tensor.') + input_tensors = input_tensors[0] + if not isinstance(input_tensors, backend.KerasTensor): + raise ValueError(f'Argument `input_tensors` must be a KerasTensor. Received invalid value: input_tensors={input_tensors}') + inputs = Input(tensor=input_tensors, name=input_name) + new_layers = [inputs] + new_layers + elif input_batch_shape is not None: + inputs = Input(tensor=input_tensors, batch_shape=input_batch_shape, dtype=input_dtype, name=input_name) + new_layers = [inputs] + new_layers + return Sequential(new_layers, name=model.name, trainable=model.trainable) + +def _clone_functional_model(model, clone_function, input_tensors=None, call_function=None): + if not callable(clone_function): + raise ValueError(f'Expected `clone_function` argument to be a callable. Received: clone_function={clone_function}') + if not isinstance(model, Functional): + raise ValueError(f'Expected `model` argument to be a Functional Model instance. Received: model={model}') + if input_tensors is not None: + if not all((isinstance(x, backend.KerasTensor) for x in tree.flatten(input_tensors))): + raise ValueError(f'All entries in `input_tensors` must be KerasTensors. Received invalid values: inputs_tensors={input_tensors}') + try: + tree.assert_same_structure(input_tensors, model.input) + except (ValueError, TypeError) as e: + raise ValueError(f'`input_tensors` must have the same structure as model.input\nReference structure: {model.input}\nReceived structure: {input_tensors}') from e + else: + input_tensors = tree.map_structure(lambda x: Input(batch_shape=x.shape, dtype=x.dtype, name=x.name), model.input) + + def operation_fn(layer): + new_layer = clone_function(layer) + return new_layer + output_tensors = model._run_through_graph(input_tensors, operation_fn=operation_fn, call_fn=call_function) + if functional_like_constructor(model.__class__): + new_model = model.__class__(input_tensors, output_tensors, name=model.name) + else: + new_model = Functional(input_tensors, output_tensors, name=model.name) + return new_model + +# File: keras-master/keras/src/models/functional.py +import copy +import inspect +import typing +import warnings +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.backend.common import global_state +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.model import Model +from keras.src.ops.function import Function +from keras.src.ops.function import _build_map +from keras.src.ops.function import make_node_key +from keras.src.ops.node import KerasHistory +from keras.src.ops.node import Node +from keras.src.saving import serialization_lib +from keras.src.utils import tracking + +class Functional(Function, Model): + + def __new__(cls, *args, **kwargs): + return typing.cast(cls, super().__new__(cls)) + + @tracking.no_automatic_dependency_tracking + def __init__(self, inputs, outputs, name=None, **kwargs): + if isinstance(inputs, dict): + for (k, v) in inputs.items(): + if isinstance(v, backend.KerasTensor) and k != v.name: + warnings.warn(f"When providing `inputs` as a dict, all keys in the dict must match the names of the corresponding tensors. Received key '{k}' mapping to value {v} which has name '{v.name}'. Change the tensor name to '{k}' (via `Input(..., name='{k}')`)") + trainable = kwargs.pop('trainable', None) + flat_inputs = tree.flatten(inputs) + flat_outputs = tree.flatten(outputs) + for x in flat_inputs: + if not isinstance(x, backend.KerasTensor): + raise ValueError(f'All `inputs` values must be KerasTensors. Received: inputs={inputs} including invalid value {x} of type {type(x)}') + for x in flat_outputs: + if not isinstance(x, backend.KerasTensor): + raise ValueError(f'All `outputs` values must be KerasTensors. Received: outputs={outputs} including invalid value {x} of type {type(x)}') + if not all((is_input_keras_tensor(t) for t in flat_inputs)): + (inputs, outputs) = clone_graph_nodes(inputs, outputs) + Function.__init__(self, inputs, outputs, name=name, **kwargs) + if trainable is not None: + self.trainable = trainable + self._layers = self.layers + self.build(None) + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + output_layers = [x._keras_history[0] for x in self.outputs] + self.output_names = [x.name for x in output_layers] + + def _lock_state(self): + pass + + def _obj_type(self): + return 'Functional' + + @property + def layers(self): + layers = [] + for operation in self._operations: + if isinstance(operation, Layer): + layers.append(operation) + return layers + + def call(self, inputs, training=None, mask=None): + inputs = self._standardize_inputs(inputs) + if mask is None: + masks = [None] * len(inputs) + else: + masks = tree.flatten(mask) + for (x, mask) in zip(inputs, masks): + if mask is not None: + x._keras_mask = mask + outputs = self._run_through_graph(inputs, operation_fn=lambda op: operation_fn(op, training=training)) + return unpack_singleton(outputs) + + def compute_output_spec(self, inputs, training=None, mask=None): + return super().compute_output_spec(inputs) + + def compute_output_shape(self, input_shape): + return super().compute_output_shape(input_shape) + + def build(self, input_shape): + self.built = True + + @property + def input_shape(self): + input_shapes = tree.map_structure(lambda x: x.shape, self.inputs) + if isinstance(input_shapes, list) and len(input_shapes) == 1: + return input_shapes[0] + return input_shapes + + @property + def output_shape(self): + output_shapes = tree.map_structure(lambda x: x.shape, self.outputs) + if isinstance(output_shapes, list) and len(output_shapes) == 1: + return output_shapes[0] + return output_shapes + + def _assert_input_compatibility(self, *args): + return super(Model, self)._assert_input_compatibility(*args) + + def _maybe_warn_inputs_struct_mismatch(self, inputs): + try: + tree.assert_same_structure(inputs, self._inputs_struct, check_types=False) + except: + model_inputs_struct = tree.map_structure(lambda x: x.name, self._inputs_struct) + inputs_struct = tree.map_structure(lambda x: '*', inputs) + warnings.warn(f"The structure of `inputs` doesn't match the expected structure: {model_inputs_struct}. Received: the structure of inputs={inputs_struct}") + + def _convert_inputs_to_tensors(self, flat_inputs): + converted = [] + for (x, input) in zip(flat_inputs, self._inputs): + if x is None: + converted.append(x) + else: + converted.append(ops.convert_to_tensor(x, dtype=input.dtype, sparse=input.sparse)) + return converted + + def _adjust_input_rank(self, flat_inputs): + flat_ref_shapes = [x.shape for x in self._inputs] + adjusted = [] + for (x, ref_shape) in zip(flat_inputs, flat_ref_shapes): + if x is None: + adjusted.append(x) + continue + x_rank = len(x.shape) + ref_rank = len(ref_shape) + if x_rank == ref_rank: + adjusted.append(x) + continue + if x_rank == ref_rank + 1: + if x.shape[-1] == 1: + adjusted.append(ops.squeeze(x, axis=-1)) + continue + if x_rank == ref_rank - 1: + if ref_shape[-1] == 1: + adjusted.append(ops.expand_dims(x, axis=-1)) + continue + raise ValueError(f'Invalid input shape for input {x}. Expected shape {ref_shape}, but input has incompatible shape {x.shape}') + for i in range(len(flat_inputs)): + if hasattr(flat_inputs[i], '_keras_history'): + adjusted[i]._keras_history = flat_inputs[i]._keras_history + if hasattr(flat_inputs[i], '_keras_mask'): + adjusted[i]._keras_mask = flat_inputs[i]._keras_mask + return adjusted + + def _standardize_inputs(self, inputs): + self._maybe_warn_inputs_struct_mismatch(inputs) + flat_inputs = tree.flatten(inputs) + flat_inputs = self._convert_inputs_to_tensors(flat_inputs) + return self._adjust_input_rank(flat_inputs) + + @property + def input(self): + return self._inputs_struct + + @property + def output(self): + return self._outputs_struct + + def add_loss(self, loss): + raise NotImplementedError + + @property + def input_spec(self): + if hasattr(self, '_manual_input_spec'): + return self._manual_input_spec + + def shape_with_no_batch_size(x): + x = list(x) + if x: + x[0] = None + return tuple(x) + + def make_spec_for_tensor(x): + optional = False + if isinstance(x._keras_history[0], InputLayer): + if x._keras_history[0].optional: + optional = True + return InputSpec(shape=shape_with_no_batch_size(x.shape), allow_last_axis_squeeze=True, name=x._keras_history[0].name, optional=optional) + if isinstance(self._inputs_struct, dict): + if all((isinstance(x, backend.KerasTensor) for x in self._inputs_struct.values())): + names = sorted(self._inputs_struct.keys()) + return [InputSpec(shape=shape_with_no_batch_size(self._inputs_struct[name].shape), allow_last_axis_squeeze=True, name=name) for name in names] + return None + return [make_spec_for_tensor(x) for x in self.inputs] + + @input_spec.setter + def input_spec(self, value): + self._manual_input_spec = value + + def get_config(self): + if not functional_like_constructor(self.__class__): + return Model.get_config(self) + config = {'name': self.name, 'trainable': self.trainable} + node_reindexing_map = {} + for operation in self.operations: + if issubclass(operation.__class__, Functional): + kept_nodes = 1 + else: + kept_nodes = 0 + for (original_node_index, node) in enumerate(operation._inbound_nodes): + node_key = make_node_key(operation, original_node_index) + if node_key in self._nodes: + node_reindexing_map[node_key] = kept_nodes + kept_nodes += 1 + layer_configs = [] + for operation in self.operations: + filtered_inbound_nodes = [] + for (original_node_index, node) in enumerate(operation._inbound_nodes): + node_key = make_node_key(operation, original_node_index) + if node_key in self._nodes: + node_data = serialize_node(node, own_nodes=self._nodes) + if node_data is not None: + filtered_inbound_nodes.append(node_data) + serialize_obj_fn = serialization_lib.serialize_keras_object + if global_state.get_global_attribute('use_legacy_config', False): + serialize_obj_fn = legacy_serialization.serialize_keras_object + layer_config = serialize_obj_fn(operation) + layer_config['name'] = operation.name + layer_config['inbound_nodes'] = filtered_inbound_nodes + layer_configs.append(layer_config) + config['layers'] = layer_configs + + def get_tensor_config(tensor): + operation = tensor._keras_history[0] + node_index = tensor._keras_history[1] + tensor_index = tensor._keras_history[2] + node_key = make_node_key(operation, node_index) + assert node_key in self._nodes + new_node_index = node_reindexing_map[node_key] + return [operation.name, new_node_index, tensor_index] + + def map_tensors(tensors): + if isinstance(tensors, backend.KerasTensor): + return [get_tensor_config(tensors)] + return tree.map_structure(get_tensor_config, tensors) + config['input_layers'] = map_tensors(self._inputs_struct) + config['output_layers'] = map_tensors(self._outputs_struct) + return copy.deepcopy(config) + +def functional_from_config(cls, config, custom_objects=None): + created_layers = {} + unprocessed_nodes = {} + + def add_unprocessed_node(layer, node_data): + if layer not in unprocessed_nodes: + unprocessed_nodes[layer] = [node_data] + else: + unprocessed_nodes[layer].append(node_data) + + def process_node(layer, node_data): + (args, kwargs) = deserialize_node(node_data, created_layers) + layer(*args, **kwargs) + + def process_layer(layer_data): + layer_name = layer_data['name'] + if 'module' not in layer_data: + layer = saving_utils.model_from_config(layer_data, custom_objects=custom_objects) + else: + layer = serialization_lib.deserialize_keras_object(layer_data, custom_objects=custom_objects) + created_layers[layer_name] = layer + inbound_nodes_data = layer_data['inbound_nodes'] + for node_data in inbound_nodes_data: + add_unprocessed_node(layer, node_data) + for layer_data in config['layers']: + process_layer(layer_data) + while unprocessed_nodes: + for layer_data in config['layers']: + layer = created_layers[layer_data['name']] + if layer in unprocessed_nodes: + node_data_list = unprocessed_nodes[layer] + node_index = 0 + while node_index < len(node_data_list): + node_data = node_data_list[node_index] + try: + process_node(layer, node_data) + except IndexError: + break + node_index += 1 + if node_index < len(node_data_list): + unprocessed_nodes[layer] = node_data_list[node_index:] + else: + del unprocessed_nodes[layer] + name = config.get('name') + trainable = config.get('trainable') + + def get_tensor(layer_name, node_index, tensor_index): + assert layer_name in created_layers + layer = created_layers[layer_name] + if isinstance(layer, Functional): + node_index -= 1 + layer_output_tensors = layer._inbound_nodes[node_index].output_tensors + return layer_output_tensors[tensor_index] + + def map_tensors(tensors): + if isinstance(tensors, list) and len(tensors) == 3 and isinstance(tensors[0], str): + return get_tensor(*tensors) + if isinstance(tensors, dict): + return {k: map_tensors(v) for (k, v) in tensors.items()} + if isinstance(tensors, tuple): + return tuple([map_tensors(v) for v in tensors]) + return [map_tensors(v) for v in tensors] + input_tensors = map_tensors(config['input_layers']) + output_tensors = map_tensors(config['output_layers']) + if isinstance(input_tensors, list) and len(input_tensors) == 1: + input_tensors = input_tensors[0] + if isinstance(output_tensors, list) and len(output_tensors) == 1: + output_tensors = output_tensors[0] + return cls(inputs=input_tensors, outputs=output_tensors, name=name, trainable=trainable) + +def operation_fn(operation, training): + + def call(*args, **kwargs): + if hasattr(operation, '_call_has_training_arg') and operation._call_has_training_arg and (training is not None): + kwargs['training'] = training + return operation(*args, **kwargs) + return call + +def functional_like_constructor(cls): + init_args = inspect.getfullargspec(cls.__init__).args[1:] + functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:] + if init_args == functional_init_args: + return True + return False + +def unpack_singleton(x): + if isinstance(x, (list, tuple)) and len(x) == 1: + return x[0] + return x + +def serialize_node(node, own_nodes=()): + if not node.input_tensors: + return + + def serialize_keras_tensor(x): + if isinstance(x, backend.KerasTensor): + (operation, node_index, tensor_index) = x._keras_history + irrelevant_node_count = 0 + for (i, node) in enumerate(operation._inbound_nodes[:node_index]): + node_key = make_node_key(operation, i) + if node_key not in own_nodes: + irrelevant_node_count += 1 + x._keras_history = KerasHistory(operation, node_index - irrelevant_node_count, tensor_index) + serialized = serialization_lib.serialize_keras_object(x) + x._keras_history = KerasHistory(operation, node_index, tensor_index) + return serialized + return x + args = node.arguments.args + kwargs = node.arguments.kwargs + args = tree.map_structure(serialize_keras_tensor, args) + kwargs = tree.map_structure(serialize_keras_tensor, kwargs) + return {'args': serialization_lib.serialize_keras_object(args), 'kwargs': serialization_lib.serialize_keras_object(kwargs)} + +def deserialize_node(node_data, created_layers): + if not node_data: + return ([], {}) + if isinstance(node_data, list): + input_tensors = [] + for input_data in node_data: + inbound_layer_name = input_data[0] + inbound_node_index = input_data[1] + inbound_tensor_index = input_data[2] + if len(input_data) == 3: + kwargs = {} + elif len(input_data) == 4: + kwargs = input_data[3] + else: + raise ValueError('Cannot deserialize the model (invalid config data?)') + inbound_layer = created_layers[inbound_layer_name] + if len(inbound_layer._inbound_nodes) <= inbound_node_index: + raise IndexError(f'Layer node index out of bounds.\ninbound_layer = {inbound_layer}\ninbound_layer._inbound_nodes = {inbound_layer._inbound_nodes}\ninbound_node_index = {inbound_node_index}') + inbound_node = inbound_layer._inbound_nodes[inbound_node_index] + input_tensors.append(inbound_node.output_tensors[inbound_tensor_index]) + return ([unpack_singleton(input_tensors)], kwargs) + args = serialization_lib.deserialize_keras_object(node_data['args']) + kwargs = serialization_lib.deserialize_keras_object(node_data['kwargs']) + + def convert_revived_tensor(x): + if isinstance(x, backend.KerasTensor): + history = x._pre_serialization_keras_history + if history is None: + return x + layer = created_layers.get(history[0], None) + if layer is None: + raise ValueError(f'Unknown layer: {history[0]}') + inbound_node_index = history[1] + inbound_tensor_index = history[2] + if len(layer._inbound_nodes) <= inbound_node_index: + raise ValueError(f'Layer node index out of bounds.\ninbound_layer = {layer}\ninbound_layer._inbound_nodes = {layer._inbound_nodes}\ninbound_node_index = {inbound_node_index}') + inbound_node = layer._inbound_nodes[inbound_node_index] + return inbound_node.output_tensors[inbound_tensor_index] + return x + args = tree.map_structure(convert_revived_tensor, args) + kwargs = tree.map_structure(convert_revived_tensor, kwargs) + return (args, kwargs) + +def is_input_keras_tensor(x): + (operation, node_index, _) = x._keras_history + node = operation._inbound_nodes[node_index] + return node.is_input + +def clone_single_keras_tensor(x): + return backend.KerasTensor(shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=x.name + '_clone') + +def clone_keras_tensors(tensors, kt_id_mapping): + + def swap(x): + if not isinstance(x, backend.KerasTensor): + return x + if id(x) in kt_id_mapping: + return kt_id_mapping[id(x)] + new_x = clone_single_keras_tensor(x) + kt_id_mapping[id(x)] = new_x + return new_x + return tree.map_structure(swap, tensors) + +def find_nodes_by_inputs_and_outputs(inputs, outputs): + (nodes, _) = _build_map(inputs, outputs) + return nodes + +def clone_graph_nodes(inputs, outputs): + nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) + cloned_inputs = [] + cloned_outputs = [] + kt_id_mapping = {} + op_id_mapping = {} + for kt_input in tree.flatten(inputs): + if is_input_keras_tensor(kt_input): + cloned_inputs.append(kt_input) + kt_id_mapping[id(kt_input)] = kt_input + else: + cloned_input = Input(batch_shape=kt_input.shape, dtype=kt_input.dtype, sparse=kt_input.sparse, name=kt_input.name + 'CLONE') + cloned_inputs.append(cloned_input) + kt_id_mapping[id(kt_input)] = cloned_input + op_id_mapping[id(kt_input._keras_history[0])] = cloned_input._keras_history[0] + cloned_inputs = tree.pack_sequence_as(inputs, cloned_inputs) + for kt_output in tree.flatten(outputs): + cpy = clone_single_keras_tensor(kt_output) + cpy._keras_history = kt_output._keras_history + cloned_outputs.append(cpy) + kt_id_mapping[id(kt_output)] = cpy + cloned_outputs = tree.pack_sequence_as(outputs, cloned_outputs) + for node in nodes_to_clone: + if id(node.operation) in op_id_mapping: + operation = op_id_mapping[id(node.operation)] + else: + operation = node.operation + output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) + if not isinstance(operation, InputLayer): + call_args_copy = clone_keras_tensors(node.arguments.args, kt_id_mapping) + call_kwargs_copy = clone_keras_tensors(node.arguments.kwargs, kt_id_mapping) + else: + call_args_copy = () + call_kwargs_copy = {} + Node(operation, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy) + return (cloned_inputs, cloned_outputs) + +# File: keras-master/keras/src/models/model.py +import inspect +import json +import typing +import warnings +from keras.src import backend +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.models.variable_mapping import map_saveable_variables +from keras.src.saving import saving_api +from keras.src.trainers import trainer as base_trainer +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils +if backend.backend() == 'tensorflow': + from keras.src.backend.tensorflow.trainer import TensorFlowTrainer as Trainer +elif backend.backend() == 'jax': + from keras.src.backend.jax.trainer import JAXTrainer as Trainer +elif backend.backend() == 'torch': + from keras.src.backend.torch.trainer import TorchTrainer as Trainer +elif backend.backend() == 'numpy': + from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer +else: + raise RuntimeError(f"Backend '{backend.backend()}' must implement the Trainer class.") + +@keras_export(['keras.Model', 'keras.models.Model']) +class Model(Trainer, base_trainer.Trainer, Layer): + + def __new__(cls, *args, **kwargs): + if functional_init_arguments(args, kwargs) and cls == Model: + from keras.src.models.functional import Functional + return Functional.__new__(Functional, *args, **kwargs) + return typing.cast(cls, super().__new__(cls)) + + def __init__(self, *args, **kwargs): + Trainer.__init__(self) + from keras.src.models import functional + if functional_init_arguments(args, kwargs): + inject_functional_model_class(self.__class__) + functional.Functional.__init__(self, *args, **kwargs) + else: + Layer.__init__(self, *args, **kwargs) + + def call(self, *args, **kwargs): + raise NotImplementedError(f'Model {self.__class__.__name__} does not have a `call()` method implemented.') + + @property + def layers(self): + return list(self._flatten_layers(include_self=False, recursive=False)) + + @layers.setter + def layers(self, _): + raise AttributeError('`Model.layers` attribute is reserved and should not be used. Please use another name.') + + @traceback_utils.filter_traceback + def get_layer(self, name=None, index=None): + if index is not None and name is not None: + raise ValueError(f'Provide only a layer name or a layer index. Received: index={index}, name={name}.') + if index is not None: + if len(self.layers) <= index: + raise ValueError(f'Was asked to retrieve layer at index {index} but model only has {len(self.layers)} layers.') + else: + return self.layers[index] + if name is not None: + for layer in self.layers: + if layer.name == name: + return layer + raise ValueError(f'No such layer: {name}. Existing layers are: {list((layer.name for layer in self.layers))}.') + raise ValueError('Provide either a layer name or layer index at `get_layer`.') + + @traceback_utils.filter_traceback + def summary(self, line_length=None, positions=None, print_fn=None, expand_nested=False, show_trainable=False, layer_range=None): + summary_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn, expand_nested=expand_nested, show_trainable=show_trainable, layer_range=layer_range) + + @traceback_utils.filter_traceback + def save(self, filepath, overwrite=True, zipped=None, **kwargs): + return saving_api.save_model(self, filepath, overwrite=overwrite, zipped=zipped, **kwargs) + + @traceback_utils.filter_traceback + def save_weights(self, filepath, overwrite=True): + return saving_api.save_weights(self, filepath, overwrite=overwrite) + + @traceback_utils.filter_traceback + def load_weights(self, filepath, skip_mismatch=False, **kwargs): + saving_api.load_weights(self, filepath, skip_mismatch=skip_mismatch, **kwargs) + + def quantize(self, mode, **kwargs): + from keras.src.dtype_policies import QUANTIZATION_MODES + type_check = kwargs.pop('type_check', True) + if kwargs: + raise ValueError(f'Unrecognized keyword arguments passed to {self.__class__.__name__}: {kwargs}') + if mode not in QUANTIZATION_MODES: + raise ValueError(f'Invalid quantization mode. Expected one of {QUANTIZATION_MODES}. Received: mode={mode}') + mode_changed = False + for layer in self._flatten_layers(): + list_of_sublayers = list(layer._flatten_layers()) + if len(list_of_sublayers) == 1: + try: + layer.quantize(mode, type_check=type_check) + mode_changed = True + except NotImplementedError as e: + warnings.warn(str(e)) + if mode_changed: + self.train_function = None + self.test_function = None + self.predict_function = None + + def build_from_config(self, config): + if not config: + return + if 'input_shape' in config: + if utils.is_default(self.build): + status = self._build_by_run_for_single_pos_arg(config['input_shape']) + else: + try: + self.build(config['input_shape']) + status = True + except: + status = False + self._build_shapes_dict = config + elif 'shapes_dict' in config: + if utils.is_default(self.build): + status = self._build_by_run_for_kwargs(config['shapes_dict']) + else: + try: + self.build(**config['shapes_dict']) + status = True + except: + status = False + self._build_shapes_dict = config['shapes_dict'] + if not status: + warnings.warn(f"Model '{self.name}' had a build config, but the model cannot be built automatically in `build_from_config(config)`. You should implement `def build_from_config(self, config)`, and you might also want to implement the method that generates the config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the model (i.e. its variables) upon deserialization.", stacklevel=2) + + def to_json(self, **kwargs): + from keras.src.saving import serialization_lib + model_config = serialization_lib.serialize_keras_object(self) + return json.dumps(model_config, **kwargs) + + def export(self, filepath, format='tf_saved_model', verbose=True): + from keras.src.export import export_lib + export_lib.export_model(self, filepath, verbose) + + @classmethod + def from_config(cls, config, custom_objects=None): + from keras.src.models.functional import Functional + functional_config_keys = ['name', 'layers', 'input_layers', 'output_layers'] + is_functional_config = all((key in config for key in functional_config_keys)) + argspec = inspect.getfullargspec(cls.__init__) + functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:] + revivable_as_functional = cls in {Functional, Model} or argspec.args[1:] == functional_init_args or (argspec.varargs == 'args' and argspec.varkw == 'kwargs') + if is_functional_config and revivable_as_functional: + from keras.src.models.functional import functional_from_config + return functional_from_config(cls, config, custom_objects=custom_objects) + try: + return cls(**config) + except TypeError as e: + raise TypeError(f'Unable to revive model from config. When overriding the `get_config()` method, make sure that the returned config contains all items used as arguments in the constructor to {cls}, which is the default behavior. You can override this default behavior by defining a `from_config(cls, config)` class method to specify how to create an instance of {cls.__name__} from its config.\n\nReceived config={config}\n\nError encountered during deserialization: {e}') + + def _get_variable_map(self): + store = {} + map_saveable_variables(self, store=store, visited_saveables=set()) + return store + + def get_state_tree(self, value_format='backend_tensor'): + variables = {} + variables['trainable_variables'] = self._create_nested_dict(self.trainable_variables, value_format) + variables['non_trainable_variables'] = self._create_nested_dict(self.non_trainable_variables, value_format) + variables['optimizer_variables'] = self._create_nested_dict(self.optimizer.variables, value_format) + variables['metrics_variables'] = self._create_nested_dict(self.metrics_variables, value_format) + return variables + + def _create_nested_dict(self, variables, value_format): + flat_dict = {} + for v in variables: + if v.path in flat_dict: + raise ValueError(f"The following variable path is found twice in the model: '{v.path}'. `get_state_tree()` can only be called when all variable paths are unique. Make sure to give unique names to your layers (and other objects).") + if value_format == 'backend_tensor': + flat_dict[v.path] = v.value + elif value_format == 'numpy_array': + flat_dict[v.path] = v.numpy() + else: + raise ValueError(f"Invalid `value_format` argument. Expected one of {{'numpy_array', 'backend_tensor'}}. Received: value_format={value_format}") + nested_dict = {} + for (path, value) in flat_dict.items(): + parts = path.split('/') + current_dict = nested_dict + for part in parts[:-1]: + if part not in current_dict: + current_dict[part] = {} + current_dict = current_dict[part] + current_dict[parts[-1]] = value + return nested_dict + + def set_state_tree(self, state_tree): + for (k, v) in state_tree.items(): + path_value_dict = self._flatten_nested_dict(v) + if k == 'trainable_variables': + self._assign_variable_values(self.trainable_variables, path_value_dict) + elif k == 'non_trainable_variables': + self._assign_variable_values(self.non_trainable_variables, path_value_dict) + elif k == 'optimizer_variables': + self._assign_variable_values(self.optimizer.variables, path_value_dict) + elif k == 'metrics_variables': + self._assign_variable_values(self.metrics_variables, path_value_dict) + else: + raise ValueError(f'Unknown variable name: {k}') + + def _assign_variable_values(self, variables, path_value_dict): + for (path, value) in path_value_dict.items(): + for variable in variables: + if variable.path == path: + variable.assign(value) + + def _flatten_nested_dict(self, nested_dict): + flat_dict = {} + + def _flatten(current_dict, prefix=''): + for (key, value) in current_dict.items(): + if isinstance(value, dict): + _flatten(value, prefix + key + '/') + else: + flat_dict[prefix + key] = value + _flatten(nested_dict) + return flat_dict + +@keras_export('keras.models.model_from_json') +def model_from_json(json_string, custom_objects=None): + from keras.src.saving import serialization_lib + model_config = json.loads(json_string) + return serialization_lib.deserialize_keras_object(model_config, custom_objects=custom_objects) + +def functional_init_arguments(args, kwargs): + return len(args) == 2 or (len(args) == 1 and 'outputs' in kwargs) or ('inputs' in kwargs and 'outputs' in kwargs) + +def inject_functional_model_class(cls): + from keras.src.models import functional + if cls == Model: + return functional.Functional + if cls == object: + return object + cls.__bases__ = tuple((inject_functional_model_class(base) for base in cls.__bases__)) + cls.__new__(cls) + return cls + +# File: keras-master/keras/src/models/sequential.py +import copy +import inspect +import typing +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_shape +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.saving import serialization_lib + +@keras_export(['keras.Sequential', 'keras.models.Sequential']) +class Sequential(Model): + + def __new__(cls, *args, **kwargs): + return typing.cast(cls, super().__new__(cls)) + + def __init__(self, layers=None, trainable=True, name=None): + super().__init__(trainable=trainable, name=name) + self._functional = None + self._layers = [] + if layers: + for layer in layers: + self.add(layer, rebuild=False) + self._maybe_rebuild() + + def add(self, layer, rebuild=True): + if not self._layers: + if getattr(layer, '_input_shape_arg', None) is not None: + self.add(InputLayer(shape=layer._input_shape_arg)) + if hasattr(layer, '_keras_history'): + origin_layer = layer._keras_history[0] + if isinstance(origin_layer, InputLayer): + layer = origin_layer + if not isinstance(layer, Layer): + raise ValueError(f'Only instances of `keras.Layer` can be added to a Sequential model. Received: {layer} (of type {type(layer)})') + if not self._is_layer_name_unique(layer): + raise ValueError(f"All layers added to a Sequential model should have unique names. Name '{layer.name}' is already the name of a layer in this model. Update the `name` argument to pass a unique name.") + if isinstance(layer, InputLayer) and self._layers and isinstance(self._layers[0], InputLayer): + raise ValueError(f"Sequential model '{self.name}' has already been configured to use input shape {self._layers[0].batch_shape}. You cannot add a different Input layer to it.") + self._layers.append(layer) + if rebuild: + self._maybe_rebuild() + else: + self.built = False + self._functional = None + + def pop(self, rebuild=True): + layer = self._layers.pop() + self.built = False + self._functional = None + if rebuild: + self._maybe_rebuild() + return layer + + def _maybe_rebuild(self): + self.built = False + self._functional = None + if isinstance(self._layers[0], InputLayer) and len(self._layers) > 1: + input_shape = self._layers[0].batch_shape + self.build(input_shape) + elif hasattr(self._layers[0], 'input_shape') and len(self._layers) > 1: + input_shape = self._layers[0].input_shape + self.build(input_shape) + + def _lock_state(self): + pass + + def _obj_type(self): + return 'Sequential' + + def build(self, input_shape=None): + try: + input_shape = standardize_shape(input_shape) + except: + return + if not self._layers: + raise ValueError(f'Sequential model {self.name} cannot be built because it has no layers. Call `model.add(layer)`.') + if isinstance(self._layers[0], InputLayer): + if self._layers[0].batch_shape != input_shape: + raise ValueError(f"Sequential model '{self.name}' has already been configured to use input shape {self._layers[0].batch_shape}. You cannot build it with input_shape {input_shape}") + else: + dtype = self._layers[0].compute_dtype + self._layers = [InputLayer(batch_shape=input_shape, dtype=dtype)] + self._layers + inputs = self._layers[0].output + x = inputs + for layer in self._layers[1:]: + try: + x = layer(x) + except NotImplementedError: + return + except TypeError as e: + signature = inspect.signature(layer.call) + positional_args = [param for param in signature.parameters.values() if param.default == inspect.Parameter.empty] + if len(positional_args) != 1: + raise ValueError(f'Layers added to a Sequential model can only have a single positional argument, the input tensor. Layer {layer.__class__.__name__} has multiple positional arguments: {positional_args}') + raise e + outputs = x + self._functional = Functional(inputs=inputs, outputs=outputs) + self.built = True + + def call(self, inputs, training=None, mask=None): + if self._functional: + return self._functional.call(inputs, training=training, mask=mask) + for layer in self.layers: + kwargs = {} + if layer._call_has_mask_arg: + kwargs['mask'] = mask + if layer._call_has_training_arg and training is not None: + kwargs['training'] = training + outputs = layer(inputs, **kwargs) + inputs = outputs + + def _get_mask_from_keras_tensor(kt): + return getattr(kt, '_keras_mask', None) + mask = tree.map_structure(_get_mask_from_keras_tensor, outputs) + return outputs + + @property + def layers(self): + layers = self._layers + if layers and isinstance(layers[0], InputLayer): + return layers[1:] + return layers[:] + + def compute_output_spec(self, inputs, training=None, mask=None): + if self._functional: + return self._functional.compute_output_spec(inputs, training=training, mask=mask) + for layer in self.layers: + outputs = layer.compute_output_spec(inputs, training=training) + inputs = outputs + return outputs + + def compute_output_shape(self, input_shape): + if self._functional: + return self._functional.compute_output_shape(input_shape) + for layer in self.layers: + output_shape = layer.compute_output_shape(input_shape) + input_shape = output_shape + return output_shape + + @property + def input_shape(self): + if self._functional: + return self._functional.input_shape + raise AttributeError(f"Sequential model '{self.name}' has no defined input shape yet.") + + @property + def output_shape(self): + if self._functional: + return self._functional.output_shape + raise AttributeError(f"Sequential model '{self.name}' has no defined output shape yet.") + + @property + def inputs(self): + if self._functional: + return self._functional.inputs + raise AttributeError(f"Sequential model '{self.name}' has no defined inputs yet.") + + @property + def outputs(self): + if self._functional: + return self._functional.outputs + raise AttributeError(f"Sequential model '{self.name}' has no defined outputs yet.") + + @property + def input_dtype(self): + layers = self._layers + if layers and isinstance(layers[0], InputLayer): + return layers[0].dtype + return super().input_dtype + + def _is_layer_name_unique(self, layer): + for ref_layer in self._layers: + if layer.name == ref_layer.name and ref_layer is not layer: + return False + return True + + def get_config(self): + serialize_fn = serialization_lib.serialize_keras_object + if global_state.get_global_attribute('use_legacy_config', False): + serialize_fn = legacy_serialization.serialize_keras_object + layer_configs = [] + for layer in super().layers: + layer_configs.append(serialize_fn(layer)) + config = Model.get_config(self) + config['name'] = self.name + config['layers'] = copy.deepcopy(layer_configs) + if self._functional is not None: + config['build_input_shape'] = self._layers[0].batch_shape + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + if 'name' in config: + name = config['name'] + build_input_shape = config.get('build_input_shape') + layer_configs = config['layers'] + else: + name = None + layer_configs = config + model = cls(name=name) + for layer_config in layer_configs: + if 'module' not in layer_config: + layer = saving_utils.model_from_config(layer_config, custom_objects=custom_objects) + else: + layer = serialization_lib.deserialize_keras_object(layer_config, custom_objects=custom_objects) + model.add(layer) + if not model._functional and build_input_shape and isinstance(build_input_shape, (tuple, list)): + model.build(build_input_shape) + return model + +# File: keras-master/keras/src/models/variable_mapping.py +from keras.src.layers.layer import Layer +from keras.src.metrics.metric import Metric +from keras.src.optimizers.optimizer import Optimizer +from keras.src.saving import saving_lib +from keras.src.saving.keras_saveable import KerasSaveable + +def map_saveable_variables(saveable, store, visited_saveables): + if id(saveable) in visited_saveables: + return + visited_saveables.add(id(saveable)) + variables = [] + if isinstance(saveable, Layer): + variables = saveable._trainable_variables + saveable._non_trainable_variables + elif isinstance(saveable, Optimizer): + variables = saveable._variables + elif isinstance(saveable, Metric): + variables = saveable._variables + for v in variables: + if v.path in store: + raise ValueError(f"The model contains two variables with a duplicate path: path='{v.path}' appears at least twice. This path is used for {v} and for {store[v.path]}. In order to get a variable map, make sure to use unique paths/names for each variable.") + store[v.path] = v + for (child_attr, child_obj) in saving_lib._walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): + map_saveable_variables(child_obj, store, visited_saveables=visited_saveables) + elif isinstance(child_obj, (list, dict, tuple, set)): + map_container_variables(child_obj, store, visited_saveables=visited_saveables) + +def map_container_variables(container, store, visited_saveables): + if isinstance(container, dict): + container = list(container.values()) + for saveable in container: + if isinstance(saveable, KerasSaveable): + map_saveable_variables(saveable, store, visited_saveables=visited_saveables) + +# File: keras-master/keras/src/ops/__init__.py +from keras.src.backend import cast +from keras.src.backend import cond +from keras.src.backend import is_tensor +from keras.src.backend import name_scope +from keras.src.backend import random +from keras.src.ops import image +from keras.src.ops import operation_utils +from keras.src.ops.core import * +from keras.src.ops.linalg import * +from keras.src.ops.math import * +from keras.src.ops.nn import * +from keras.src.ops.numpy import * + +# File: keras-master/keras/src/ops/core.py +import numpy as np +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.backend.common.backend_utils import slice_along_axis +from keras.src.ops.operation import Operation +from keras.src.utils import traceback_utils + +class Map(Operation): + + def __init__(self): + super().__init__() + + def call(self, f, xs): + return backend.core.map(f, xs) + + def compute_output_spec(self, f, xs): + x = xs[0] + n = xs.shape[0] + y = backend.compute_output_spec(f, x) + + def append_batch_axis(x): + return KerasTensor(shape=(n,) + x.shape, dtype=x.dtype, sparse=x.sparse) + y = tree.map_structure(append_batch_axis, y) + return y + +@keras_export('keras.ops.map') +def map(f, xs): + if any_symbolic_tensors((xs,)): + return Map().symbolic_call(f, xs) + return backend.core.map(f, xs) + +class Scan(Operation): + + def __init__(self, reverse=False, unroll=1): + super().__init__() + self.reverse = reverse + self.unroll = unroll + + def call(self, f, init, xs, length): + return backend.core.scan(f, init, xs, length, reverse=self.reverse, unroll=self.unroll) + + def compute_output_spec(self, f, init, xs, length): + if xs is None: + n = int(length) + x = None + else: + n = int(length) if length is not None else tree.flatten(xs)[0].shape[0] + x = xs[0] + (carry, y) = backend.compute_output_spec(f, init, x) + y = KerasTensor(shape=(n,) + y.shape, dtype=y.dtype, sparse=y.sparse) + return (carry, y) + +@keras_export('keras.ops.scan') +def scan(f, init, xs=None, length=None, reverse=False, unroll=1): + if any_symbolic_tensors((init, xs)): + return Scan(reverse=reverse, unroll=unroll).symbolic_call(f, init, xs, length) + return backend.core.scan(f, init, xs, length, reverse=reverse, unroll=unroll) + +class AssociativeScan(Operation): + + def __init__(self, reverse=False): + super().__init__() + self.reverse = reverse + + def call(self, f, elems, axis=0): + return backend.core.associative_scan(f, elems, reverse=self.reverse, axis=axis) + + def compute_output_spec(self, f, elems, axis): + elems_flat = tree.flatten(elems) + lens = [elem.shape[axis] for elem in elems_flat] + if len(set(lens)) != 1: + raise ValueError('Array inputs to associative_scan must have the same first dimension. (saw: {})'.format([elem.shape for elem in elems_flat])) + x = tree.pack_sequence_as(elems, [slice_along_axis(x, 0, 1, axis=axis) for x in elems_flat]) + y_spec = backend.compute_output_spec(f, x, x) + + def _restore_shape(x): + return KerasTensor(shape=elems_flat[0].shape, dtype=x.dtype, sparse=x.sparse) + y_spec = tree.map_structure(_restore_shape, y_spec) + return y_spec + +@keras_export('keras.ops.associative_scan') +def associative_scan(f, elems, reverse=False, axis=0): + if any_symbolic_tensors((elems,)): + return AssociativeScan(reverse=reverse).symbolic_call(f, elems, axis) + return backend.core.associative_scan(f, elems, reverse=reverse, axis=axis) + +class Scatter(Operation): + + def call(self, indices, values, shape): + return backend.core.scatter(indices, values, shape) + + def compute_output_spec(self, indices, values, shape): + return KerasTensor(shape, dtype=values.dtype) + +@keras_export('keras.ops.scatter') +def scatter(indices, values, shape): + if any_symbolic_tensors((indices, values, shape)): + return Scatter().symbolic_call(indices, values, shape) + return backend.core.scatter(indices, values, shape) + +class ScatterUpdate(Operation): + + def call(self, inputs, indices, updates): + return backend.core.scatter_update(inputs, indices, updates) + + def compute_output_spec(self, inputs, indices, updates): + return KerasTensor(inputs.shape, dtype=inputs.dtype) + +@keras_export('keras.ops.scatter_update') +def scatter_update(inputs, indices, updates): + if any_symbolic_tensors((inputs, indices, updates)): + return ScatterUpdate().symbolic_call(inputs, indices, updates) + return backend.core.scatter_update(inputs, indices, updates) + +class Slice(Operation): + + def call(self, inputs, start_indices, shape): + return backend.core.slice(inputs, start_indices, shape) + + def compute_output_spec(self, inputs, start_indices, shape): + return KerasTensor(shape, dtype=inputs.dtype) + +@keras_export('keras.ops.slice') +def slice(inputs, start_indices, shape): + if any_symbolic_tensors((inputs, start_indices, shape)): + return Slice().symbolic_call(inputs, start_indices, shape) + return backend.core.slice(inputs, start_indices, shape) + +class SliceUpdate(Operation): + + def call(self, inputs, start_indices, updates): + return backend.core.slice_update(inputs, start_indices, updates) + + def compute_output_spec(self, inputs, start_indices, updates): + return KerasTensor(inputs.shape, dtype=inputs.dtype) + +@keras_export('keras.ops.slice_update') +def slice_update(inputs, start_indices, updates): + if any_symbolic_tensors((inputs, start_indices, updates)): + return SliceUpdate().symbolic_call(inputs, start_indices, updates) + return backend.core.slice_update(inputs, start_indices, updates) + +class Switch(Operation): + + def call(self, index, branches, *operands): + return backend.core.switch(index, branches, *operands) + + def compute_output_spec(self, index, branches, *operands): + spec = backend.compute_output_spec(branches[0], *operands) + return spec + +@keras_export('keras.ops.switch') +def switch(index, branches, *operands): + if any_symbolic_tensors(operands): + return Switch().symbolic_call(index, branches, *operands) + return backend.core.switch(index, branches, *operands) + +class WhileLoop(Operation): + + def __init__(self, cond, body, maximum_iterations): + super().__init__() + self.cond = cond + self.body = body + self.maximum_iterations = maximum_iterations + + def call(self, loop_vars): + return backend.core.while_loop(self.cond, self.body, loop_vars, maximum_iterations=self.maximum_iterations) + + def compute_output_spec(self, loop_vars): + return [KerasTensor(v.shape, dtype=v.dtype) for v in loop_vars] + +@keras_export('keras.ops.while_loop') +def while_loop(cond, body, loop_vars, maximum_iterations=None): + return backend.core.while_loop(cond, body, loop_vars, maximum_iterations=maximum_iterations) + +class StopGradient(Operation): + + def __init__(self): + super().__init__() + + def call(self, variable): + return backend.core.stop_gradient(variable) + + def compute_output_spec(self, variable): + return KerasTensor(variable.shape, dtype=variable.dtype) + +@keras_export('keras.ops.stop_gradient') +def stop_gradient(variable): + if any_symbolic_tensors((variable,)): + return StopGradient().symbolic_call(variable) + return backend.core.stop_gradient(variable) + +class ForiLoop(Operation): + + def __init__(self, lower, upper, body_fun): + super().__init__() + self.lower = lower + self.upper = upper + self.body_fun = body_fun + + def call(self, init_val): + return backend.core.fori_loop(self.lower, self.upper, self.body_fun, init_val) + + def compute_output_spec(self, init_val): + return KerasTensor(init_val.shape, dtype=init_val.dtype) + +@keras_export('keras.ops.fori_loop') +def fori_loop(lower, upper, body_fun, init_val): + if any_symbolic_tensors((lower, upper, init_val)): + return ForiLoop(lower, upper, body_fun).symbolic_call(init_val) + return backend.core.fori_loop(lower, upper, body_fun, init_val) + +class Unstack(Operation): + + def __init__(self, num=None, axis=0): + super().__init__() + self.num = num + self.axis = axis + + def call(self, x): + return backend.core.unstack(x, self.num, self.axis) + + def compute_output_spec(self, x): + axis = self.axis + if axis < 0: + axis = len(x.shape) + axis + output_shapes = x.shape[:axis] + x.shape[axis + 1:] + num = self.num + if num is None: + num = x.shape[axis] + if num is None: + raise ValueError(f'Cannot infer argument `num` from shape {x.shape}. Either provide a tensor with a concrete shape in the `axis` dimension or explicitly pass the `num` argument.') + output = [KerasTensor(shape=output_shapes, dtype=x.dtype) for _ in range(num)] + return output + +@keras_export('keras.ops.unstack') +def unstack(x, num=None, axis=0): + if any_symbolic_tensors((x,)): + return Unstack(num, axis).symbolic_call(x) + return backend.core.unstack(x, num=num, axis=axis) + +@keras_export('keras.ops.shape') +def shape(x): + if any_symbolic_tensors((x,)): + return x.shape + return backend.core.shape(x) + +@keras_export('keras.ops.dtype') +def dtype(x): + return backend.standardize_dtype(x.dtype) + +class Cast(Operation): + + def __init__(self, dtype): + super().__init__() + self.dtype = backend.standardize_dtype(dtype) + + def call(self, x): + return backend.core.cast(x, self.dtype) + + def compute_output_spec(self, x): + return backend.KerasTensor(shape=x.shape, dtype=self.dtype) + +@keras_export('keras.ops.cast') +def cast(x, dtype): + dtype = backend.standardize_dtype(dtype) + if any_symbolic_tensors((x,)): + return Cast(dtype=dtype)(x) + return backend.core.cast(x, dtype) + +class SaturateCast(Operation): + + def __init__(self, dtype): + super().__init__() + self.dtype = backend.standardize_dtype(dtype) + + def call(self, x): + return _saturate_cast(x, self.dtype) + + def compute_output_spec(self, x): + return backend.KerasTensor(shape=x.shape, dtype=self.dtype) + +@keras_export('keras.ops.saturate_cast') +def saturate_cast(x, dtype): + dtype = backend.standardize_dtype(dtype) + if any_symbolic_tensors((x,)): + return SaturateCast(dtype=dtype)(x) + return _saturate_cast(x, dtype) + +def _saturate_cast(x, dtype, backend_module=None): + backend_module = backend_module or backend + dtype = backend.standardize_dtype(dtype) + in_dtype = backend.standardize_dtype(x.dtype) + in_info = np.iinfo(in_dtype) if 'int' in in_dtype else np.finfo(in_dtype) + out_info = np.iinfo(dtype) if 'int' in dtype else np.finfo(dtype) + min_limit = np.maximum(in_info.min, out_info.min).astype(in_dtype) + if min_limit < out_info.min: + min_limit = np.nextafter(min_limit, 0, dtype=in_dtype) + max_limit = np.minimum(in_info.max, out_info.max).astype(in_dtype) + if max_limit > out_info.max: + max_limit = np.nextafter(max_limit, 0, dtype=in_dtype) + x = backend_module.numpy.clip(x, min_limit, max_limit) + return backend_module.cast(x, dtype) + +@keras_export('keras.ops.convert_to_tensor') +def convert_to_tensor(x, dtype=None, sparse=None): + return backend.convert_to_tensor(x, dtype=dtype, sparse=sparse) + +@keras_export('keras.ops.convert_to_numpy') +def convert_to_numpy(x): + if any_symbolic_tensors((x,)): + return np.array(x) + return backend.convert_to_numpy(x) + +class Cond(Operation): + + @traceback_utils.filter_traceback + def __call__(self, *args, **kwargs): + + def call_fn(*args, **kwargs): + if any_symbolic_tensors(args, kwargs): + return self.symbolic_call(*args, **kwargs) + else: + return self.call(*args, **kwargs) + if traceback_utils.is_traceback_filtering_enabled(): + call_fn = traceback_utils.inject_argument_info_in_traceback(call_fn, object_name=f'{self.__class__.__name__}.call()') + return call_fn(*args, **kwargs) + return call_fn(*args, **kwargs) + + def call(self, pred, true_fn, false_fn): + return backend.core.cond(pred, true_fn, false_fn) + + def compute_output_spec(self, pred, true_fn, false_fn): + true_fn_spec = backend.compute_output_spec(true_fn) + false_fn_spec = backend.compute_output_spec(false_fn) + if not self._check_output_spec(true_fn_spec, false_fn_spec): + raise ValueError(f'`true_fn` and `false_fn` should return outputs of the same kind (struct, dtype and shape). Got {true_fn_spec} and {false_fn_spec} instead.') + return true_fn_spec + + def _check_output_spec(self, true_fn_spec, false_fn_spec): + try: + tree.assert_same_structure(true_fn_spec, false_fn_spec) + except: + return False + + def check_leaf(t_spec, f_spec): + if t_spec is None or f_spec is None: + return t_spec is None and f_spec is None + return t_spec.shape == f_spec.shape and t_spec.dtype == f_spec.dtype + same = tree.map_structure(check_leaf, true_fn_spec, false_fn_spec) + return all(tree.flatten(same)) + +@keras_export('keras.ops.cond') +def cond(pred, true_fn, false_fn): + return Cond()(pred, true_fn, false_fn) + +@keras_export('keras.ops.vectorized_map') +def vectorized_map(function, elements): + return backend.core.vectorized_map(function, elements) + +@keras_export('keras.ops.is_tensor') +def is_tensor(x): + return backend.core.is_tensor(x) + +@keras_export('keras.ops.custom_gradient') +def custom_gradient(f): + return backend.core.custom_gradient(f) + +# File: keras-master/keras/src/ops/function.py +import collections +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend.config import backend +from keras.src.ops.operation import Operation + +@keras_export('keras.Function') +class Function(Operation): + + def __init__(self, inputs, outputs, name=None): + super().__init__(name=name) + if backend() == 'tensorflow': + _self_setattr_tracking = getattr(self, '_self_setattr_tracking', True) + self._self_setattr_tracking = False + self._inputs_struct = tree.map_structure(lambda x: x, inputs) + self._outputs_struct = tree.map_structure(lambda x: x, outputs) + self._inputs = tree.flatten(inputs) + self._outputs = tree.flatten(outputs) + if not self._inputs: + raise ValueError(f'`inputs` argument cannot be empty. Received:\ninputs={inputs}\noutputs={outputs}') + if not self._outputs: + raise ValueError(f'`outputs` argument cannot be empty. Received:\ninputs={inputs}\noutputs={outputs}') + if backend() == 'tensorflow': + self._self_setattr_tracking = _self_setattr_tracking + (nodes, nodes_by_depth, operations, operations_by_depth) = map_graph(self._inputs, self._outputs) + self._nodes = nodes + self._nodes_by_depth = nodes_by_depth + self._operations = operations + self._operations_by_depth = operations_by_depth + + @property + def operations(self): + return self._operations[:] + + @property + def inputs(self): + return self._inputs + + @property + def outputs(self): + return self._outputs + + def compute_output_spec(self, inputs): + self._assert_input_compatibility(inputs) + shortcut = True + for (x, x_ref) in zip(tree.flatten(inputs), self._inputs): + if x.shape != x_ref.shape: + shortcut = False + break + if shortcut: + return tree.map_structure(lambda x: KerasTensor(shape=x.shape, dtype=x.dtype), self._outputs_struct) + return self._run_through_graph(inputs, operation_fn=lambda op: op.compute_output_spec) + + def compute_output_shape(self, input_shape): + input_shape_struct = tree.map_shape_structure(lambda x: KerasTensor(shape=x), input_shape) + for (x, x_ref) in zip(tree.flatten(input_shape_struct), self._inputs): + x._dtype = x_ref.dtype + x._sparse = x_ref.sparse + output_spec = self.compute_output_spec(input_shape_struct) + return tree.map_structure(lambda x: x.shape, output_spec) + + def call(self, inputs): + self._assert_input_compatibility(inputs) + return self._run_through_graph(inputs, operation_fn=lambda op: op) + + def _run_through_graph(self, inputs, operation_fn, call_fn=None): + inputs = tree.flatten(inputs) + tensor_dict = {} + for (x, y) in zip(self.inputs, inputs): + tensor_dict[id(x)] = y + nodes_by_depth = self._nodes_by_depth + depth_keys = list(nodes_by_depth.keys()) + depth_keys.sort(reverse=True) + for depth in depth_keys: + nodes = nodes_by_depth[depth] + for node in nodes: + if not node.operation or node.is_input: + continue + if any((id(x) not in tensor_dict for x in node.input_tensors)): + continue + (args, kwargs) = node.arguments.fill_in(tensor_dict) + op = operation_fn(node.operation) + if call_fn is not None: + outputs = call_fn(op, *args, **kwargs) + else: + outputs = op(*args, **kwargs) + for (x, y) in zip(node.outputs, tree.flatten(outputs)): + tensor_dict[id(x)] = y + output_tensors = [] + for x in self.outputs: + output_tensors.append(tensor_dict[id(x)]) + return tree.pack_sequence_as(self._outputs_struct, output_tensors) + + def _assert_input_compatibility(self, inputs): + try: + tree.assert_same_structure(inputs, self._inputs_struct, check_types=False) + except ValueError: + raise ValueError(f'Function was called with an invalid input structure. Expected input structure: {self._inputs_struct}\nReceived input structure: {inputs}') + for (x, x_ref) in zip(tree.flatten(inputs), self._inputs): + if len(x.shape) != len(x_ref.shape): + raise ValueError(f"{self.__class__.__name__} was passed incompatible inputs. For input '{x_ref.name}', expected shape {x_ref.shape}, but received instead a tensor with shape {x.shape}.") + for (dim, ref_dim) in zip(x.shape, x_ref.shape): + if ref_dim is not None and dim is not None: + if dim != ref_dim: + raise ValueError(f"{self.__class__.__name__} was passed incompatible inputs. For input '{x_ref.name}', expected shape {x_ref.shape}, but received instead a tensor with shape {x.shape}.") + +def make_node_key(op, node_index): + return str(id(op)) + '_ib-' + str(node_index) + +def map_graph(inputs, outputs): + (nodes_in_decreasing_depth, operation_indices) = _build_map(inputs, outputs) + network_nodes = {make_node_key(node.operation, node.operation._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth} + nodes_depths = {} + operations_depths = {} + for node in reversed(nodes_in_decreasing_depth): + depth = nodes_depths.setdefault(node, 0) + previous_depth = operations_depths.get(node.operation, 0) + depth = max(depth, previous_depth) + operations_depths[node.operation] = depth + nodes_depths[node] = depth + for node_dep in node.parent_nodes: + previous_depth = nodes_depths.get(node_dep, 0) + nodes_depths[node_dep] = max(depth + 1, previous_depth) + for input_t in inputs: + input_operation = input_t._keras_history[0] + if input_operation and input_operation not in operations_depths: + operations_depths[input_operation] = 0 + operation_indices[input_operation] = -1 + nodes_depths[input_operation._inbound_nodes[0]] = 0 + network_nodes.add(make_node_key(input_operation, 0)) + nodes_by_depth = collections.defaultdict(list) + for (node, depth) in nodes_depths.items(): + nodes_by_depth[depth].append(node) + operations_by_depth = collections.defaultdict(list) + for (operation, depth) in operations_depths.items(): + operations_by_depth[depth].append(operation) + depth_keys = list(operations_by_depth.keys()) + depth_keys.sort(reverse=True) + operations = [] + for depth in depth_keys: + operations_for_depth = operations_by_depth[depth] + operations_for_depth.sort(key=lambda x: operation_indices[x]) + operations.extend(operations_for_depth) + depth_keys = list(nodes_by_depth.keys()) + depth_keys.sort(reverse=True) + computable_tensors = set() + for x in inputs: + computable_tensors.add(x) + operations_with_complete_input = [] + for depth in depth_keys: + for node in nodes_by_depth[depth]: + for x in tree.flatten(node.input_tensors): + if x not in computable_tensors: + operation = node.operation + raise ValueError(f"Graph disconnected: cannot find parent for tensor {x} at operation '{operation}'. The following previous operations were accessed without issue: {operations_with_complete_input}") + operations_with_complete_input.append(operation.name) + for x in tree.flatten(node.outputs): + computable_tensors.add(x) + all_names = [operation.name for operation in operations] + for name in all_names: + if all_names.count(name) != 1: + raise ValueError(f'The name "{name}" is used {all_names.count(name)} times in the model. All operation names should be unique.') + return (network_nodes, nodes_by_depth, operations, operations_by_depth) + +def _build_map(inputs, outputs): + finished_nodes = set() + nodes_in_progress = set() + nodes_in_decreasing_depth = [] + operation_indices = {} + for output in tree.flatten(outputs): + _build_map_helper(inputs, output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, operation_indices) + return (nodes_in_decreasing_depth, operation_indices) + +def _build_map_helper(inputs, tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, operation_indices): + (operation, node_index, _) = tensor._keras_history + if not operation: + return + node = operation._inbound_nodes[node_index] + if node in finished_nodes: + return + if node in nodes_in_progress: + raise ValueError(f"Tensor {tensor} from operation '{operation.name}' is part of a cycle.") + if operation not in operation_indices: + operation_indices[operation] = len(operation_indices) + nodes_in_progress.add(node) + if not node.is_input and tensor not in tree.flatten(inputs): + for tensor in node.input_tensors: + _build_map_helper(inputs, tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, operation_indices) + finished_nodes.add(node) + nodes_in_progress.remove(node) + nodes_in_decreasing_depth.append(node) + +# File: keras-master/keras/src/ops/image.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import compute_conv_output_shape + +class RGBToGrayscale(Operation): + + def __init__(self, data_format=None): + super().__init__() + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return backend.image.rgb_to_grayscale(images, data_format=self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received: images.shape={images_shape}') + if self.data_format == 'channels_last': + images_shape[-1] = 1 + else: + images_shape[-3] = 1 + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.rgb_to_grayscale') +def rgb_to_grayscale(images, data_format=None): + if any_symbolic_tensors((images,)): + return RGBToGrayscale(data_format=data_format).symbolic_call(images) + return backend.image.rgb_to_grayscale(images, data_format=data_format) + +class RGBToHSV(Operation): + + def __init__(self, data_format=None): + super().__init__() + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return backend.image.rgb_to_hsv(images, data_format=self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + dtype = images.dtype + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received: images.shape={images_shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={dtype}') + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.rgb_to_hsv') +def rgb_to_hsv(images, data_format=None): + if any_symbolic_tensors((images,)): + return RGBToHSV(data_format=data_format).symbolic_call(images) + return backend.image.rgb_to_hsv(images, data_format=data_format) + +class HSVToRGB(Operation): + + def __init__(self, data_format=None): + super().__init__() + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return backend.image.hsv_to_rgb(images, data_format=self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + dtype = images.dtype + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received: images.shape={images_shape}') + if not backend.is_float_dtype(dtype): + raise ValueError(f'Invalid images dtype: expected float dtype. Received: images.dtype={dtype}') + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.hsv_to_rgb') +def hsv_to_rgb(images, data_format=None): + if any_symbolic_tensors((images,)): + return HSVToRGB(data_format=data_format).symbolic_call(images) + return backend.image.hsv_to_rgb(images, data_format=data_format) + +class Resize(Operation): + + def __init__(self, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + super().__init__() + self.size = tuple(size) + self.interpolation = interpolation + self.antialias = antialias + self.crop_to_aspect_ratio = crop_to_aspect_ratio + self.pad_to_aspect_ratio = pad_to_aspect_ratio + self.fill_mode = fill_mode + self.fill_value = fill_value + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return _resize(images, self.size, interpolation=self.interpolation, antialias=self.antialias, data_format=self.data_format, crop_to_aspect_ratio=self.crop_to_aspect_ratio, pad_to_aspect_ratio=self.pad_to_aspect_ratio, fill_mode=self.fill_mode, fill_value=self.fill_value) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if self.data_format == 'channels_last': + (height_axis, width_axis) = (-3, -2) + else: + (height_axis, width_axis) = (-2, -1) + images_shape[height_axis] = self.size[0] + images_shape[width_axis] = self.size[1] + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.resize') +def resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + if len(size) != 2: + raise ValueError(f'Expected `size` to be a tuple of 2 integers. Received: size={size}') + if len(images.shape) < 3 or len(images.shape) > 4: + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError('Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` can be `True`.') + if any_symbolic_tensors((images,)): + return Resize(size, interpolation=interpolation, antialias=antialias, data_format=data_format, crop_to_aspect_ratio=crop_to_aspect_ratio, pad_to_aspect_ratio=pad_to_aspect_ratio, fill_mode=fill_mode, fill_value=fill_value).symbolic_call(images) + return _resize(images, size, interpolation=interpolation, antialias=antialias, crop_to_aspect_ratio=crop_to_aspect_ratio, data_format=data_format, pad_to_aspect_ratio=pad_to_aspect_ratio, fill_mode=fill_mode, fill_value=fill_value) + +def _resize(images, size, interpolation='bilinear', antialias=False, crop_to_aspect_ratio=False, pad_to_aspect_ratio=False, fill_mode='constant', fill_value=0.0, data_format=None): + resized = backend.image.resize(images, size, interpolation=interpolation, antialias=antialias, crop_to_aspect_ratio=crop_to_aspect_ratio, data_format=data_format, pad_to_aspect_ratio=pad_to_aspect_ratio, fill_mode=fill_mode, fill_value=fill_value) + if resized.dtype == images.dtype: + return resized + if backend.is_int_dtype(images.dtype): + resized = ops.round(resized) + return ops.saturate_cast(resized, images.dtype) + +class AffineTransform(Operation): + + def __init__(self, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + super().__init__() + self.interpolation = interpolation + self.fill_mode = fill_mode + self.fill_value = fill_value + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images, transform): + return backend.image.affine_transform(images, transform, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, data_format=self.data_format) + + def compute_output_spec(self, images, transform): + if len(images.shape) not in (3, 4): + raise ValueError(f'Invalid images rank: expected rank 3 (single image) or rank 4 (batch of images). Received input with shape: images.shape={images.shape}') + if len(transform.shape) not in (1, 2): + raise ValueError(f'Invalid transform rank: expected rank 1 (single transform) or rank 2 (batch of transforms). Received input with shape: transform.shape={transform.shape}') + return KerasTensor(images.shape, dtype=images.dtype) + +@keras_export('keras.ops.image.affine_transform') +def affine_transform(images, transform, interpolation='bilinear', fill_mode='constant', fill_value=0, data_format=None): + if any_symbolic_tensors((images, transform)): + return AffineTransform(interpolation=interpolation, fill_mode=fill_mode, fill_value=fill_value, data_format=data_format).symbolic_call(images, transform) + return backend.image.affine_transform(images, transform, interpolation=interpolation, fill_mode=fill_mode, fill_value=fill_value, data_format=data_format) + +class ExtractPatches(Operation): + + def __init__(self, size, strides=None, dilation_rate=1, padding='valid', data_format=None): + super().__init__() + if isinstance(size, int): + size = (size, size) + self.size = size + self.strides = strides + self.dilation_rate = dilation_rate + self.padding = padding + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return _extract_patches(images=images, size=self.size, strides=self.strides, dilation_rate=self.dilation_rate, padding=self.padding, data_format=self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + original_ndim = len(images_shape) + if not self.strides: + strides = (self.size[0], self.size[1]) + if self.data_format == 'channels_last': + channels_in = images_shape[-1] + else: + channels_in = images_shape[-3] + if original_ndim == 3: + images_shape = [1] + images_shape + filters = self.size[0] * self.size[1] * channels_in + kernel_size = (self.size[0], self.size[1]) + out_shape = compute_conv_output_shape(images_shape, filters, kernel_size, strides=strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + if original_ndim == 3: + out_shape = out_shape[1:] + return KerasTensor(shape=out_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.extract_patches') +def extract_patches(images, size, strides=None, dilation_rate=1, padding='valid', data_format=None): + if any_symbolic_tensors((images,)): + return ExtractPatches(size=size, strides=strides, dilation_rate=dilation_rate, padding=padding, data_format=data_format).symbolic_call(images) + return _extract_patches(images, size, strides, dilation_rate, padding, data_format=data_format) + +def _extract_patches(images, size, strides=None, dilation_rate=1, padding='valid', data_format=None): + if isinstance(size, int): + patch_h = patch_w = size + elif len(size) == 2: + (patch_h, patch_w) = (size[0], size[1]) + else: + raise TypeError(f'Invalid `size` argument. Expected an int or a tuple of length 2. Received: size={size}') + data_format = backend.standardize_data_format(data_format) + if data_format == 'channels_last': + channels_in = images.shape[-1] + elif data_format == 'channels_first': + channels_in = images.shape[-3] + if not strides: + strides = size + out_dim = patch_h * patch_w * channels_in + kernel = backend.numpy.eye(out_dim, dtype=images.dtype) + kernel = backend.numpy.reshape(kernel, (patch_h, patch_w, channels_in, out_dim)) + _unbatched = False + if len(images.shape) == 3: + _unbatched = True + images = backend.numpy.expand_dims(images, axis=0) + patches = backend.nn.conv(inputs=images, kernel=kernel, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate) + if _unbatched: + patches = backend.numpy.squeeze(patches, axis=0) + return patches + +class MapCoordinates(Operation): + + def __init__(self, order, fill_mode='constant', fill_value=0): + super().__init__() + self.order = order + self.fill_mode = fill_mode + self.fill_value = fill_value + + def call(self, inputs, coordinates): + return backend.image.map_coordinates(inputs, coordinates, order=self.order, fill_mode=self.fill_mode, fill_value=self.fill_value) + + def compute_output_spec(self, inputs, coordinates): + if coordinates.shape[0] != len(inputs.shape): + raise ValueError(f'First dim of `coordinates` must be the same as the rank of `inputs`. Received inputs with shape: {inputs.shape} and coordinate leading dim of {coordinates.shape[0]}') + if len(coordinates.shape) < 2: + raise ValueError(f'Invalid coordinates rank: expected at least rank 2. Received input with shape: {coordinates.shape}') + return KerasTensor(coordinates.shape[1:], dtype=inputs.dtype) + +@keras_export('keras.ops.image.map_coordinates') +def map_coordinates(inputs, coordinates, order, fill_mode='constant', fill_value=0): + if any_symbolic_tensors((inputs, coordinates)): + return MapCoordinates(order, fill_mode, fill_value).symbolic_call(inputs, coordinates) + return backend.image.map_coordinates(inputs, coordinates, order, fill_mode, fill_value) + +class PadImages(Operation): + + def __init__(self, top_padding=None, left_padding=None, bottom_padding=None, right_padding=None, target_height=None, target_width=None, data_format=None): + super().__init__() + self.top_padding = top_padding + self.left_padding = left_padding + self.bottom_padding = bottom_padding + self.right_padding = right_padding + self.target_height = target_height + self.target_width = target_width + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return _pad_images(images, self.top_padding, self.left_padding, self.bottom_padding, self.right_padding, self.target_height, self.target_width, self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + if self.data_format == 'channels_last': + (height_axis, width_axis) = (-3, -2) + (height, width) = (images_shape[height_axis], images_shape[width_axis]) + else: + (height_axis, width_axis) = (-2, -1) + (height, width) = (images_shape[height_axis], images_shape[width_axis]) + target_height = self.target_height + if target_height is None and height is not None: + target_height = self.top_padding + height + self.bottom_padding + target_width = self.target_width + if target_width is None and width is not None: + target_width = self.left_padding + width + self.right_padding + images_shape[height_axis] = target_height + images_shape[width_axis] = target_width + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.pad_images') +def pad_images(images, top_padding=None, left_padding=None, bottom_padding=None, right_padding=None, target_height=None, target_width=None, data_format=None): + if any_symbolic_tensors((images,)): + return PadImages(top_padding, left_padding, bottom_padding, right_padding, target_height, target_width, data_format).symbolic_call(images) + return _pad_images(images, top_padding, left_padding, bottom_padding, right_padding, target_height, target_width, data_format) + +def _pad_images(images, top_padding, left_padding, bottom_padding, right_padding, target_height, target_width, data_format=None): + data_format = backend.standardize_data_format(data_format) + images = backend.convert_to_tensor(images) + images_shape = ops.shape(images) + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid shape for argument `images`: it must have rank 3 or 4. Received: images.shape={images_shape}') + if [top_padding, bottom_padding, target_height].count(None) != 1: + raise ValueError(f'Must specify exactly two of top_padding, bottom_padding, target_height. Received: top_padding={top_padding}, bottom_padding={bottom_padding}, target_height={target_height}') + if [left_padding, right_padding, target_width].count(None) != 1: + raise ValueError(f'Must specify exactly two of left_padding, right_padding, target_width. Received: left_padding={left_padding}, right_padding={right_padding}, target_width={target_width}') + is_batch = False if len(images_shape) == 3 else True + if data_format == 'channels_last': + (height, width) = (images_shape[-3], images_shape[-2]) + else: + (height, width) = (images_shape[-2], images_shape[-1]) + if top_padding is None: + top_padding = target_height - bottom_padding - height + if bottom_padding is None: + bottom_padding = target_height - top_padding - height + if left_padding is None: + left_padding = target_width - right_padding - width + if right_padding is None: + right_padding = target_width - left_padding - width + if top_padding < 0: + raise ValueError(f'top_padding must be >= 0. Received: top_padding={top_padding}') + if left_padding < 0: + raise ValueError(f'left_padding must be >= 0. Received: left_padding={left_padding}') + if right_padding < 0: + raise ValueError(f'right_padding must be >= 0. Received: right_padding={right_padding}') + if bottom_padding < 0: + raise ValueError(f'bottom_padding must be >= 0. Received: bottom_padding={bottom_padding}') + pad_width = [[top_padding, bottom_padding], [left_padding, right_padding]] + if data_format == 'channels_last': + pad_width = pad_width + [[0, 0]] + else: + pad_width = [[0, 0]] + pad_width + if is_batch: + pad_width = [[0, 0]] + pad_width + padded_images = backend.numpy.pad(images, pad_width) + return padded_images + +class CropImages(Operation): + + def __init__(self, top_cropping, left_cropping, bottom_cropping, right_cropping, target_height, target_width, data_format=None): + super().__init__() + self.top_cropping = top_cropping + self.bottom_cropping = bottom_cropping + self.left_cropping = left_cropping + self.right_cropping = right_cropping + self.target_height = target_height + self.target_width = target_width + self.data_format = backend.standardize_data_format(data_format) + + def call(self, images): + return _crop_images(images, self.top_cropping, self.left_cropping, self.bottom_cropping, self.right_cropping, self.target_height, self.target_width, self.data_format) + + def compute_output_spec(self, images): + images_shape = list(images.shape) + if self.data_format == 'channels_last': + (height_axis, width_axis) = (-3, -2) + else: + (height_axis, width_axis) = (-2, -1) + (height, width) = (images_shape[height_axis], images_shape[width_axis]) + if height is None and self.target_height is None: + raise ValueError(f'When the height of the images is unknown, `target_height` must be specified.Received images.shape={images_shape} and target_height={self.target_height}') + if width is None and self.target_width is None: + raise ValueError(f'When the width of the images is unknown, `target_width` must be specified.Received images.shape={images_shape} and target_width={self.target_width}') + target_height = self.target_height + if target_height is None: + target_height = height - self.top_cropping - self.bottom_cropping + target_width = self.target_width + if target_width is None: + target_width = width - self.left_cropping - self.right_cropping + images_shape[height_axis] = target_height + images_shape[width_axis] = target_width + return KerasTensor(shape=images_shape, dtype=images.dtype) + +@keras_export('keras.ops.image.crop_images') +def crop_images(images, top_cropping=None, left_cropping=None, bottom_cropping=None, right_cropping=None, target_height=None, target_width=None, data_format=None): + if any_symbolic_tensors((images,)): + return CropImages(top_cropping, left_cropping, bottom_cropping, right_cropping, target_height, target_width, data_format).symbolic_call(images) + return _crop_images(images, top_cropping, left_cropping, bottom_cropping, right_cropping, target_height, target_width, data_format) + +def _crop_images(images, top_cropping, left_cropping, bottom_cropping, right_cropping, target_height, target_width, data_format=None): + data_format = backend.standardize_data_format(data_format) + images = backend.convert_to_tensor(images) + images_shape = ops.shape(images) + if len(images_shape) not in (3, 4): + raise ValueError(f'Invalid shape for argument `images`: it must have rank 3 or 4. Received: images.shape={images_shape}') + if [top_cropping, bottom_cropping, target_height].count(None) != 1: + raise ValueError(f'Must specify exactly two of top_cropping, bottom_cropping, target_height. Received: top_cropping={top_cropping}, bottom_cropping={bottom_cropping}, target_height={target_height}') + if [left_cropping, right_cropping, target_width].count(None) != 1: + raise ValueError(f'Must specify exactly two of left_cropping, right_cropping, target_width. Received: left_cropping={left_cropping}, right_cropping={right_cropping}, target_width={target_width}') + is_batch = False if len(images_shape) == 3 else True + if data_format == 'channels_last': + (height, width) = (images_shape[-3], images_shape[-2]) + channels = images_shape[-1] + else: + (height, width) = (images_shape[-2], images_shape[-1]) + channels = images_shape[-3] + if top_cropping is None: + top_cropping = height - target_height - bottom_cropping + if target_height is None: + target_height = height - bottom_cropping - top_cropping + if left_cropping is None: + left_cropping = width - target_width - right_cropping + if target_width is None: + target_width = width - right_cropping - left_cropping + if top_cropping < 0: + raise ValueError(f'top_cropping must be >= 0. Received: top_cropping={top_cropping}') + if target_height < 0: + raise ValueError(f'target_height must be >= 0. Received: target_height={target_height}') + if left_cropping < 0: + raise ValueError(f'left_cropping must be >= 0. Received: left_cropping={left_cropping}') + if target_width < 0: + raise ValueError(f'target_width must be >= 0. Received: target_width={target_width}') + start_indices = [top_cropping, left_cropping] + shape = [target_height, target_width] + if data_format == 'channels_last': + start_indices = start_indices + [0] + shape = shape + [channels] + else: + start_indices = [0] + start_indices + shape = [channels] + shape + if is_batch: + batch_size = images_shape[0] + start_indices = [0] + start_indices + shape = [batch_size] + shape + cropped_images = ops.slice(images, start_indices, shape) + return cropped_images + +# File: keras-master/keras/src/ops/linalg.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape + +class Cholesky(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _cholesky(x) + + def compute_output_spec(self, x): + _assert_2d(x) + _assert_square(x) + return KerasTensor(x.shape, x.dtype) + +@keras_export(['keras.ops.cholesky', 'keras.ops.linalg.cholesky']) +def cholesky(x): + if any_symbolic_tensors((x,)): + return Cholesky().symbolic_call(x) + return _cholesky(x) + +def _cholesky(x): + x = backend.convert_to_tensor(x) + _assert_2d(x) + _assert_square(x) + try: + return backend.linalg.cholesky(x) + except Exception as e: + raise ValueError(f'Cholesky decomposition failed: {e}') + +class Det(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _det(x) + + def compute_output_spec(self, x): + _assert_2d(x) + _assert_square(x) + return KerasTensor(x.shape[:-2], x.dtype) + +@keras_export(['keras.ops.det', 'keras.ops.linalg.det']) +def det(x): + if any_symbolic_tensors((x,)): + return Det().symbolic_call(x) + return _det(x) + +def _det(x): + x = backend.convert_to_tensor(x) + _assert_2d(x) + _assert_square(x) + return backend.linalg.det(x) + +class Eig(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _eig(x) + + def compute_output_spec(self, x): + _assert_square(x) + _assert_2d(x) + return (KerasTensor(x.shape[:-1], x.dtype), KerasTensor(x.shape, x.dtype)) + +@keras_export(['keras.ops.eig', 'keras.ops.linalg.eig']) +def eig(x): + if any_symbolic_tensors((x,)): + return Eig().symbolic_call(x) + return _eig(x) + +def _eig(x): + x = backend.convert_to_tensor(x) + _assert_square(x) + _assert_2d(x) + return backend.linalg.eig(x) + +class Eigh(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _eigh(x) + + def compute_output_spec(self, x): + _assert_square(x) + _assert_2d(x) + return (KerasTensor(x.shape[:-1], x.dtype), KerasTensor(x.shape, x.dtype)) + +@keras_export(['keras.ops.eigh', 'keras.ops.linalg.eigh']) +def eigh(x): + if any_symbolic_tensors((x,)): + return Eigh().symbolic_call(x) + return _eigh(x) + +def _eigh(x): + x = backend.convert_to_tensor(x) + _assert_square(x) + _assert_2d(x) + return backend.linalg.eigh(x) + +class Inv(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _inv(x) + + def compute_output_spec(self, x): + _assert_2d(x) + _assert_square(x) + return KerasTensor(x.shape, x.dtype) + +@keras_export(['keras.ops.inv', 'keras.ops.linalg.inv']) +def inv(x): + if any_symbolic_tensors((x,)): + return Inv().symbolic_call(x) + return _inv(x) + +def _inv(x): + x = backend.convert_to_tensor(x) + _assert_2d(x) + _assert_square(x) + return backend.linalg.inv(x) + +class LuFactor(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _lu_factor(x) + + def compute_output_spec(self, x): + _assert_2d(x) + batch_shape = x.shape[:-2] + (m, n) = x.shape[-2:] + k = min(m, n) + return (KerasTensor(batch_shape + (m, n), x.dtype), KerasTensor(batch_shape + (k,), x.dtype)) + +@keras_export(['keras.ops.lu_factor', 'keras.ops.linalg.lu_factor']) +def lu_factor(x): + if any_symbolic_tensors((x,)): + return LuFactor().symbolic_call(x) + return _lu_factor(x) + +def _lu_factor(x): + x = backend.convert_to_tensor(x) + _assert_2d(x) + if backend.backend() == 'tensorflow': + try: + _assert_square(x) + except ValueError as e: + raise ValueError(f'LU decomposition failed: {e}. LU decomposition is only supported for square matrices in Tensorflow.') + return backend.linalg.lu_factor(x) + +class Norm(Operation): + + def __init__(self, ord=None, axis=None, keepdims=False): + super().__init__() + if isinstance(ord, str): + if ord not in ('fro', 'nuc'): + raise ValueError(f"Invalid `ord` argument. Expected one of {{'fro', 'nuc'}} when using string. Received: ord={ord}") + if isinstance(axis, int): + axis = [axis] + self.ord = ord + self.axis = axis + self.keepdims = keepdims + + def compute_output_spec(self, x): + output_dtype = backend.standardize_dtype(x.dtype) + if 'int' in output_dtype or output_dtype == 'bool': + output_dtype = backend.floatx() + if self.axis is None: + axis = tuple(range(len(x.shape))) + else: + axis = self.axis + num_axes = len(axis) + if num_axes == 1 and isinstance(self.ord, str): + raise ValueError(f'Invalid `ord` argument for vector norm. Received: ord={self.ord}') + elif num_axes == 2 and self.ord not in (None, 'fro', 'nuc', float('inf'), float('-inf'), 1, -1, 2, -2): + raise ValueError(f'Invalid `ord` argument for matrix norm. Received: ord={self.ord}') + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype) + + def call(self, x): + x = backend.convert_to_tensor(x) + return backend.linalg.norm(x, ord=self.ord, axis=self.axis, keepdims=self.keepdims) + +@keras_export(['keras.ops.norm', 'keras.ops.linalg.norm']) +def norm(x, ord=None, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Norm(ord=ord, axis=axis, keepdims=keepdims).symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) + +class Qr(Operation): + + def __init__(self, mode='reduced'): + super().__init__() + if mode not in {'reduced', 'complete'}: + raise ValueError(f"`mode` argument value not supported. Expected one of {{'reduced', 'complete'}}. Received: mode={mode}") + self.mode = mode + + def compute_output_spec(self, x): + if len(x.shape) < 2: + raise ValueError(f'Input should have rank >= 2. Received: input.shape = {x.shape}') + m = x.shape[-2] + n = x.shape[-1] + if m is None or n is None: + raise ValueError(f'Input should have its last 2 dimensions fully-defined. Received: input.shape = {x.shape}') + k = min(m, n) + base = tuple(x.shape[:-2]) + if self.mode == 'reduced': + return (KerasTensor(shape=base + (m, k), dtype=x.dtype), KerasTensor(shape=base + (k, n), dtype=x.dtype)) + return (KerasTensor(shape=base + (m, m), dtype=x.dtype), KerasTensor(shape=base + (m, n), dtype=x.dtype)) + + def call(self, x): + x = backend.convert_to_tensor(x) + return backend.linalg.qr(x, mode=self.mode) + +@keras_export(['keras.ops.qr', 'keras.ops.linalg.qr']) +def qr(x, mode='reduced'): + if any_symbolic_tensors((x,)): + return Qr(mode=mode).symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.linalg.qr(x, mode=mode) + +class Solve(Operation): + + def __init__(self): + super().__init__() + + def call(self, a, b): + return _solve(a, b) + + def compute_output_spec(self, a, b): + _assert_2d(a) + _assert_square(a) + _assert_1d(b) + _assert_a_b_compat(a, b) + return KerasTensor(b.shape, b.dtype) + +@keras_export(['keras.ops.solve', 'keras.ops.linalg.solve']) +def solve(a, b): + if any_symbolic_tensors((a, b)): + return Solve().symbolic_call(a, b) + return _solve(a, b) + +def _solve(a, b): + a = backend.convert_to_tensor(a) + b = backend.convert_to_tensor(b) + _assert_2d(a) + _assert_square(a) + _assert_1d(b) + _assert_a_b_compat(a, b) + return backend.linalg.solve(a, b) + +class SolveTriangular(Operation): + + def __init__(self, lower=False): + super().__init__() + self.lower = lower + + def call(self, a, b): + return _solve_triangular(a, b, self.lower) + + def compute_output_spec(self, a, b): + _assert_2d(a) + _assert_square(a) + _assert_1d(b) + _assert_a_b_compat(a, b) + return KerasTensor(b.shape, b.dtype) + +@keras_export(['keras.ops.solve_triangular', 'keras.ops.linalg.solve_triangular']) +def solve_triangular(a, b, lower=False): + if any_symbolic_tensors((a, b)): + return SolveTriangular(lower).symbolic_call(a, b) + return _solve_triangular(a, b, lower) + +def _solve_triangular(a, b, lower=False): + a = backend.convert_to_tensor(a) + b = backend.convert_to_tensor(b) + _assert_2d(a) + _assert_square(a) + _assert_1d(b) + _assert_a_b_compat(a, b) + return backend.linalg.solve_triangular(a, b, lower) + +class SVD(Operation): + + def __init__(self, full_matrices=True, compute_uv=True): + super().__init__() + self.full_matrices = full_matrices + self.compute_uv = compute_uv + + def call(self, x): + return _svd(x, self.full_matrices, self.compute_uv) + + def compute_output_spec(self, x): + _assert_2d(x) + (rows, columns) = x.shape[-2:] + batches = x.shape[:-2] + s_shape = batches + (min(rows, columns),) + if self.full_matrices: + u_shape = batches + (rows, rows) + v_shape = batches + (columns, columns) + else: + u_shape = batches + (rows, min(rows, columns)) + v_shape = batches + (min(rows, columns), columns) + if self.compute_uv: + return (KerasTensor(u_shape, x.dtype), KerasTensor(s_shape, x.dtype), KerasTensor(v_shape, x.dtype)) + return KerasTensor(s_shape, x.dtype) + +@keras_export(['keras.ops.svd', 'keras.ops.linalg.svd']) +def svd(x, full_matrices=True, compute_uv=True): + if any_symbolic_tensors((x,)): + return SVD(full_matrices, compute_uv).symbolic_call(x) + return _svd(x, full_matrices, compute_uv) + +def _svd(x, full_matrices=True, compute_uv=True): + x = backend.convert_to_tensor(x) + _assert_2d(x) + return backend.linalg.svd(x, full_matrices, compute_uv) + +class Lstsq(Operation): + + def __init__(self, rcond=None): + super().__init__() + self.rcond = rcond + + def call(self, a, b): + return backend.linalg.lstsq(a, b, rcond=self.rcond) + + def compute_output_spec(self, a, b): + if len(a.shape) != 2: + raise ValueError(f'Expected a to have rank 2. Received: a.shape={a.shape}') + if len(b.shape) not in (1, 2): + raise ValueError(f'Expected b to have rank 1 or 2. Received: b.shape={b.shape}') + (m, n) = a.shape + if b.shape[0] != m: + raise ValueError(f'Expected b.shape[0] to be equal to a.shape[0]. Received: a.shape={a.shape}, b.shape={b.shape}') + if len(b.shape) == 2: + k = b.shape[1] + x = KerasTensor((n, k), dtype=a.dtype) + else: + x = KerasTensor((n,), dtype=a.dtype) + return x + +@keras_export(['keras.ops.lstsq', 'keras.ops.linalg.lstsq']) +def lstsq(a, b, rcond=None): + if any_symbolic_tensors((a, b)): + return Lstsq(rcond=rcond).symbolic_call(a, b) + return backend.linalg.lstsq(a, b, rcond=rcond) + +def _assert_1d(*arrays): + for a in arrays: + if a.ndim < 1: + raise ValueError('Expected input to have rank >= 1. Received scalar input {a}.') + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise ValueError('Expected input to have rank >= 2. Received input with shape {a.shape}.') + +def _assert_square(*arrays): + for a in arrays: + (m, n) = a.shape[-2:] + if m != n: + raise ValueError(f'Expected a square matrix. Received non-square input with shape {a.shape}') + +def _assert_a_b_compat(a, b): + if a.ndim == b.ndim: + if a.shape[-2] != b.shape[-2]: + raise ValueError(f'Incompatible shapes between `a` and `b`. Expected `a.shape[-2] == b.shape[-2]`. Received: a.shape={a.shape}, b.shape={b.shape}') + elif a.ndim == b.ndim - 1: + if a.shape[-1] != b.shape[-1]: + raise ValueError(f'Incompatible shapes between `a` and `b`. Expected `a.shape[-1] == b.shape[-1]`. Received: a.shape={a.shape}, b.shape={b.shape}') + +# File: keras-master/keras/src/ops/math.py +"""""" +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape + +def _segment_reduce_validation(data, segment_ids): + data_shape = data.shape + segment_ids_shape = segment_ids.shape + if len(segment_ids_shape) > 1: + raise ValueError(f'Argument `segment_ids` should be an 1-D vector, got shape: {len(segment_ids_shape)}. Consider either flatten input with segment_ids.reshape((-1)) and data.reshape((-1, ) + data.shape[len(segment_ids.shape):]) or vectorize with vmap.') + if segment_ids_shape[0] is not None and data_shape[0] is not None and (segment_ids_shape[0] != data_shape[0]): + raise ValueError(f'Argument `segment_ids` and `data` should have same leading dimension. Got {segment_ids_shape} v.s. {data_shape}.') + +class SegmentReduction(Operation): + + def __init__(self, num_segments=None, sorted=False): + super().__init__() + self.num_segments = num_segments + self.sorted = sorted + + def compute_output_spec(self, data, _): + output_shape = (self.num_segments,) + tuple(data.shape[1:]) + return KerasTensor(shape=output_shape, dtype=data.dtype) + +class SegmentSum(SegmentReduction): + + def call(self, data, segment_ids): + _segment_reduce_validation(data, segment_ids) + return backend.math.segment_sum(data, segment_ids, num_segments=self.num_segments, sorted=self.sorted) + +@keras_export('keras.ops.segment_sum') +def segment_sum(data, segment_ids, num_segments=None, sorted=False): + _segment_reduce_validation(data, segment_ids) + if any_symbolic_tensors((data,)): + return SegmentSum(num_segments, sorted).symbolic_call(data, segment_ids) + return backend.math.segment_sum(data, segment_ids, num_segments=num_segments, sorted=sorted) + +class SegmentMax(SegmentReduction): + + def call(self, data, segment_ids): + _segment_reduce_validation(data, segment_ids) + return backend.math.segment_max(data, segment_ids, num_segments=self.num_segments, sorted=self.sorted) + +@keras_export('keras.ops.segment_max') +def segment_max(data, segment_ids, num_segments=None, sorted=False): + _segment_reduce_validation(data, segment_ids) + if any_symbolic_tensors((data,)): + return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids) + return backend.math.segment_max(data, segment_ids, num_segments=num_segments, sorted=sorted) + +class TopK(Operation): + + def __init__(self, k, sorted=False): + super().__init__() + self.k = k + self.sorted = sorted + + def compute_output_spec(self, x): + output_shape = list(x.shape) + output_shape[-1] = self.k + return (KerasTensor(shape=output_shape, dtype=x.dtype), KerasTensor(shape=output_shape, dtype='int32')) + + def call(self, x): + return backend.math.top_k(x, self.k, self.sorted) + +@keras_export('keras.ops.top_k') +def top_k(x, k, sorted=True): + if any_symbolic_tensors((x,)): + return TopK(k, sorted).symbolic_call(x) + return backend.math.top_k(x, k, sorted) + +class InTopK(Operation): + + def __init__(self, k): + super().__init__() + self.k = k + + def compute_output_spec(self, targets, predictions): + return KerasTensor(shape=targets.shape, dtype='bool') + + def call(self, targets, predictions): + return backend.math.in_top_k(targets, predictions, self.k) + +@keras_export('keras.ops.in_top_k') +def in_top_k(targets, predictions, k): + if any_symbolic_tensors((targets, predictions)): + return InTopK(k).symbolic_call(targets, predictions) + return backend.math.in_top_k(targets, predictions, k) + +class Logsumexp(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + self.axis = axis + self.keepdims = keepdims + + def compute_output_spec(self, x): + output_shape = reduce_shape(x.shape, self.axis, self.keepdims) + return KerasTensor(shape=output_shape) + + def call(self, x): + return backend.math.logsumexp(x, axis=self.axis, keepdims=self.keepdims) + +@keras_export('keras.ops.logsumexp') +def logsumexp(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Logsumexp(axis, keepdims).symbolic_call(x) + return backend.math.logsumexp(x, axis=axis, keepdims=keepdims) + +class ExtractSequences(Operation): + + def __init__(self, sequence_length, sequence_stride): + super().__init__() + self.sequence_length = sequence_length + self.sequence_stride = sequence_stride + + def compute_output_spec(self, x): + if len(x.shape) < 1: + raise ValueError(f'Input should have rank >= 1. Received: input.shape = {x.shape}') + if x.shape[-1] is not None: + num_sequences = 1 + (x.shape[-1] - self.sequence_length) // self.sequence_stride + else: + num_sequences = None + new_shape = x.shape[:-1] + (num_sequences, self.sequence_length) + return KerasTensor(shape=new_shape, dtype=x.dtype) + + def call(self, x): + return backend.math.extract_sequences(x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride) + +@keras_export('keras.ops.extract_sequences') +def extract_sequences(x, sequence_length, sequence_stride): + if any_symbolic_tensors((x,)): + return ExtractSequences(sequence_length, sequence_stride).symbolic_call(x) + return backend.math.extract_sequences(x, sequence_length, sequence_stride) + +class FFT(Operation): + + def __init__(self, axis=-1): + super().__init__() + self.axis = axis + + def compute_output_spec(self, x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if len(real.shape) < 1: + raise ValueError(f'Input should have rank >= 1. Received: input.shape = {real.shape}') + m = real.shape[-1] + if m is None: + raise ValueError(f'Input should have its {self.axis}th axis fully-defined. Received: input.shape = {real.shape}') + return (KerasTensor(shape=real.shape, dtype=real.dtype), KerasTensor(shape=imag.shape, dtype=imag.dtype)) + + def call(self, x): + return backend.math.fft(x) + +@keras_export('keras.ops.fft') +def fft(x): + if any_symbolic_tensors(x): + return FFT().symbolic_call(x) + return backend.math.fft(x) + +class FFT2(Operation): + + def __init__(self): + super().__init__() + self.axes = (-2, -1) + + def compute_output_spec(self, x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if len(real.shape) < 2: + raise ValueError(f'Input should have rank >= 2. Received: input.shape = {real.shape}') + m = real.shape[self.axes[0]] + n = real.shape[self.axes[1]] + if m is None or n is None: + raise ValueError(f'Input should have its {self.axes} axes fully-defined. Received: input.shape = {real.shape}') + return (KerasTensor(shape=real.shape, dtype=real.dtype), KerasTensor(shape=imag.shape, dtype=imag.dtype)) + + def call(self, x): + return backend.math.fft2(x) + +@keras_export('keras.ops.fft2') +def fft2(x): + if any_symbolic_tensors(x): + return FFT2().symbolic_call(x) + return backend.math.fft2(x) + +class RFFT(Operation): + + def __init__(self, fft_length=None): + super().__init__() + self.fft_length = fft_length + + def compute_output_spec(self, x): + if len(x.shape) < 1: + raise ValueError(f'Input should have rank >= 1. Received: input.shape = {x.shape}') + if self.fft_length is not None: + new_last_dimension = self.fft_length // 2 + 1 + elif x.shape[-1] is not None: + new_last_dimension = x.shape[-1] // 2 + 1 + else: + new_last_dimension = None + new_shape = x.shape[:-1] + (new_last_dimension,) + return (KerasTensor(shape=new_shape, dtype=x.dtype), KerasTensor(shape=new_shape, dtype=x.dtype)) + + def call(self, x): + return backend.math.rfft(x, fft_length=self.fft_length) + +@keras_export('keras.ops.rfft') +def rfft(x, fft_length=None): + if any_symbolic_tensors((x,)): + return RFFT(fft_length).symbolic_call(x) + return backend.math.rfft(x, fft_length) + +class IRFFT(Operation): + + def __init__(self, fft_length=None): + super().__init__() + self.fft_length = fft_length + + def compute_output_spec(self, x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if len(real.shape) < 1: + raise ValueError(f'Input should have rank >= 1. Received: input.shape = {real.shape}') + if self.fft_length is not None: + new_last_dimension = self.fft_length + elif real.shape[-1] is not None: + new_last_dimension = 2 * (real.shape[-1] - 1) + else: + new_last_dimension = None + new_shape = real.shape[:-1] + (new_last_dimension,) + return KerasTensor(shape=new_shape, dtype=real.dtype) + + def call(self, x): + return backend.math.irfft(x, fft_length=self.fft_length) + +@keras_export('keras.ops.irfft') +def irfft(x, fft_length=None): + if any_symbolic_tensors(x): + return IRFFT(fft_length).symbolic_call(x) + return backend.math.irfft(x, fft_length) + +class STFT(Operation): + + def __init__(self, sequence_length, sequence_stride, fft_length, window='hann', center=True): + super().__init__() + self.sequence_length = sequence_length + self.sequence_stride = sequence_stride + self.fft_length = fft_length + self.window = window + self.center = center + + def compute_output_spec(self, x): + if x.shape[-1] is not None: + padded = 0 if self.center is False else self.fft_length // 2 * 2 + num_sequences = 1 + (x.shape[-1] + padded - self.fft_length) // self.sequence_stride + else: + num_sequences = None + new_shape = x.shape[:-1] + (num_sequences, self.fft_length // 2 + 1) + return (KerasTensor(shape=new_shape, dtype=x.dtype), KerasTensor(shape=new_shape, dtype=x.dtype)) + + def call(self, x): + return backend.math.stft(x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, window=self.window, center=self.center) + +@keras_export('keras.ops.stft') +def stft(x, sequence_length, sequence_stride, fft_length, window='hann', center=True): + if any_symbolic_tensors((x,)): + return STFT(sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center).symbolic_call(x) + return backend.math.stft(x, sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center) + +class ISTFT(Operation): + + def __init__(self, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + super().__init__() + self.sequence_length = sequence_length + self.sequence_stride = sequence_stride + self.fft_length = fft_length + self.length = length + self.window = window + self.center = center + + def compute_output_spec(self, x): + if not isinstance(x, (tuple, list)) or len(x) != 2: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Received: x={x}') + (real, imag) = x + if real.shape != imag.shape: + raise ValueError(f'Input `x` should be a tuple of two tensors - real and imaginary. Both the real and imaginary parts should have the same shape. Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}') + if len(real.shape) < 2: + raise ValueError(f'Input should have rank >= 2. Received: input.shape = {real.shape}') + if real.shape[-2] is not None: + output_size = (real.shape[-2] - 1) * self.sequence_stride + self.fft_length + if self.length is not None: + output_size = self.length + elif self.center: + output_size = output_size - self.fft_length // 2 * 2 + else: + output_size = None + new_shape = real.shape[:-2] + (output_size,) + return KerasTensor(shape=new_shape, dtype=real.dtype) + + def call(self, x): + return backend.math.istft(x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, length=self.length, window=self.window, center=self.center) + +@keras_export('keras.ops.istft') +def istft(x, sequence_length, sequence_stride, fft_length, length=None, window='hann', center=True): + if any_symbolic_tensors(x): + return ISTFT(sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center).symbolic_call(x) + return backend.math.istft(x, sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, length=length, window=window, center=center) + +class Rsqrt(Operation): + + def call(self, x): + x = backend.convert_to_tensor(x) + return backend.math.rsqrt(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export('keras.ops.rsqrt') +def rsqrt(x): + if any_symbolic_tensors((x,)): + return Rsqrt().symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.math.rsqrt(x) + +class Erf(Operation): + + def compute_output_spec(self, x): + return KerasTensor(shape=x.shape, dtype=x.dtype) + + def call(self, x): + return backend.math.erf(x) + +@keras_export('keras.ops.erf') +def erf(x): + if any_symbolic_tensors((x,)): + return Erf().symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.math.erf(x) + +class Erfinv(Operation): + + def compute_output_spec(self, x): + return KerasTensor(shape=x.shape, dtype=x.dtype) + + def call(self, x): + return backend.math.erfinv(x) + +@keras_export('keras.ops.erfinv') +def erfinv(x): + if any_symbolic_tensors((x,)): + return Erfinv().symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.math.erfinv(x) + +class Logdet(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return backend.math.logdet(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape[:-2], dtype=x.dtype) + +@keras_export(['keras.ops.logdet']) +def logdet(x): + if any_symbolic_tensors((x,)): + return Logdet().symbolic_call(x) + return backend.math.logdet(x) + +# File: keras-master/keras/src/ops/nn.py +"""""" +import warnings +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.backend import standardize_data_format +from keras.src.backend.common.backend_utils import compute_conv_transpose_output_shape +from keras.src.ops import operation_utils +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape + +class Relu(Operation): + + def call(self, x): + return backend.nn.relu(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.relu', 'keras.ops.nn.relu']) +def relu(x): + if any_symbolic_tensors((x,)): + return Relu().symbolic_call(x) + return backend.nn.relu(x) + +class Relu6(Operation): + + def call(self, x): + return backend.nn.relu6(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.relu6', 'keras.ops.nn.relu6']) +def relu6(x): + if any_symbolic_tensors((x,)): + return Relu6().symbolic_call(x) + return backend.nn.relu6(x) + +class Sigmoid(Operation): + + def call(self, x): + return backend.nn.sigmoid(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.sigmoid', 'keras.ops.nn.sigmoid']) +def sigmoid(x): + if any_symbolic_tensors((x,)): + return Sigmoid().symbolic_call(x) + return backend.nn.sigmoid(x) + +class Softplus(Operation): + + def call(self, x): + return backend.nn.softplus(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.softplus', 'keras.ops.nn.softplus']) +def softplus(x): + if any_symbolic_tensors((x,)): + return Softplus().symbolic_call(x) + return backend.nn.softplus(x) + +class Softsign(Operation): + + def call(self, x): + return backend.nn.softsign(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.softsign', 'keras.ops.nn.softsign']) +def softsign(x): + if any_symbolic_tensors((x,)): + return Softsign().symbolic_call(x) + return backend.nn.softsign(x) + +class Silu(Operation): + + def call(self, x): + return backend.nn.silu(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.silu', 'keras.ops.nn.silu', 'keras.ops.swish', 'keras.ops.nn.swish']) +def silu(x): + if any_symbolic_tensors((x,)): + return Silu().symbolic_call(x) + return backend.nn.silu(x) + +class LogSigmoid(Operation): + + def call(self, x): + return backend.nn.log_sigmoid(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.log_sigmoid', 'keras.ops.nn.log_sigmoid']) +def log_sigmoid(x): + if any_symbolic_tensors((x,)): + return LogSigmoid().symbolic_call(x) + return backend.nn.log_sigmoid(x) + +class LeakyRelu(Operation): + + def __init__(self, negative_slope=0.2): + super().__init__() + self.negative_slope = negative_slope + + def call(self, x): + return backend.nn.leaky_relu(x, self.negative_slope) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.leaky_relu', 'keras.ops.nn.leaky_relu']) +def leaky_relu(x, negative_slope=0.2): + if any_symbolic_tensors((x,)): + return LeakyRelu(negative_slope).symbolic_call(x) + return backend.nn.leaky_relu(x, negative_slope=negative_slope) + +class HardSigmoid(Operation): + + def call(self, x): + return backend.nn.hard_sigmoid(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.hard_sigmoid', 'keras.ops.nn.hard_sigmoid']) +def hard_sigmoid(x): + if any_symbolic_tensors((x,)): + return HardSigmoid().symbolic_call(x) + return backend.nn.hard_sigmoid(x) + +class HardSilu(Operation): + + def call(self, x): + return backend.nn.hard_silu(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.hard_silu', 'keras.ops.nn.hard_silu', 'keras.ops.hard_swish', 'keras.ops.nn.hard_swish']) +def hard_silu(x): + if any_symbolic_tensors((x,)): + return HardSilu().symbolic_call(x) + return backend.nn.hard_silu(x) + +class Elu(Operation): + + def __init__(self, alpha=1.0): + super().__init__() + self.alpha = alpha + + def call(self, x): + return backend.nn.elu(x, alpha=self.alpha) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.elu', 'keras.ops.nn.elu']) +def elu(x, alpha=1.0): + if any_symbolic_tensors((x,)): + return Elu(alpha).symbolic_call(x) + return backend.nn.elu(x, alpha=alpha) + +class Selu(Operation): + + def call(self, x): + return backend.nn.selu(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.selu', 'keras.ops.nn.selu']) +def selu(x): + if any_symbolic_tensors((x,)): + return Selu().symbolic_call(x) + return backend.nn.selu(x) + +class Gelu(Operation): + + def __init__(self, approximate=True): + super().__init__() + self.approximate = approximate + + def call(self, x): + return backend.nn.gelu(x, self.approximate) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.gelu', 'keras.ops.nn.gelu']) +def gelu(x, approximate=True): + if any_symbolic_tensors((x,)): + return Gelu(approximate).symbolic_call(x) + return backend.nn.gelu(x, approximate) + +class Softmax(Operation): + + def __init__(self, axis=-1): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.nn.softmax(x, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.softmax', 'keras.ops.nn.softmax']) +def softmax(x, axis=-1): + if isinstance(axis, int) and x.shape[axis] == 1: + warnings.warn(f'You are using a softmax over axis {axis} of a tensor of shape {x.shape}. This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?') + if any_symbolic_tensors((x,)): + return Softmax(axis).symbolic_call(x) + if isinstance(axis, tuple): + axis_to_keep = [v for v in range(len(x.shape)) if v not in axis] + x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis)) + x_reshaped = backend.numpy.reshape(x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)) + x = backend.nn.softmax(x_reshaped, axis=-1) + x = backend.numpy.reshape(x, x_transposed.shape) + x = backend.numpy.transpose(x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))) + return x + else: + return backend.nn.softmax(x, axis=axis) + +class LogSoftmax(Operation): + + def __init__(self, axis=-1): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.nn.log_softmax(x, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.log_softmax', 'keras.ops.nn.log_softmax']) +def log_softmax(x, axis=-1): + if any_symbolic_tensors((x,)): + return LogSoftmax(axis).symbolic_call(x) + if isinstance(axis, tuple): + axis_to_keep = [v for v in range(len(x.shape)) if v not in axis] + x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis)) + x_reshaped = backend.numpy.reshape(x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)) + x = backend.nn.log_softmax(x_reshaped, axis=-1) + x = backend.numpy.reshape(x, x_transposed.shape) + x = backend.numpy.transpose(x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))) + return x + else: + return backend.nn.log_softmax(x, axis=axis) + +class MaxPool(Operation): + + def __init__(self, pool_size, strides=None, padding='valid', data_format=None): + super().__init__() + self.pool_size = pool_size + self.strides = strides + self.padding = padding.lower() + self.data_format = data_format + + def call(self, inputs): + return backend.nn.max_pool(inputs, self.pool_size, self.strides, self.padding, self.data_format) + + def compute_output_spec(self, inputs): + output_shape = operation_utils.compute_pooling_output_shape(inputs.shape, self.pool_size, self.strides, self.padding, self.data_format) + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.max_pool', 'keras.ops.nn.max_pool']) +def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return MaxPool(pool_size, strides, padding, data_format).symbolic_call(inputs) + return backend.nn.max_pool(inputs, pool_size, strides, padding, data_format) + +class AveragePool(Operation): + + def __init__(self, pool_size, strides=None, padding='valid', data_format=None): + super().__init__() + self.pool_size = pool_size + self.strides = strides + self.padding = padding.lower() + self.data_format = data_format + + def call(self, inputs): + return backend.nn.average_pool(inputs, self.pool_size, self.strides, self.padding, self.data_format) + + def compute_output_spec(self, inputs): + output_shape = operation_utils.compute_pooling_output_shape(inputs.shape, self.pool_size, self.strides, self.padding, self.data_format) + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.average_pool', 'keras.ops.nn.average_pool']) +def average_pool(inputs, pool_size, strides=None, padding='valid', data_format=None): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return AveragePool(pool_size, strides, padding, data_format).symbolic_call(inputs) + return backend.nn.average_pool(inputs, pool_size, strides, padding, data_format) + +class Conv(Operation): + + def __init__(self, strides=1, padding='valid', data_format=None, dilation_rate=1): + super().__init__() + self.strides = strides + self.padding = padding.lower() + self.data_format = data_format + self.dilation_rate = dilation_rate + + def call(self, inputs, kernel): + return backend.nn.conv(inputs, kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) + + def compute_output_spec(self, inputs, kernel): + output_shape = operation_utils.compute_conv_output_shape(inputs.shape, kernel.shape[-1], kernel.shape[:-2], self.strides, self.padding, self.data_format, self.dilation_rate) + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.conv', 'keras.ops.nn.conv']) +def conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return Conv(strides, padding, data_format, dilation_rate).symbolic_call(inputs, kernel) + return backend.nn.conv(inputs, kernel, strides, padding, data_format, dilation_rate) + +class DepthwiseConv(Operation): + + def __init__(self, strides=1, padding='valid', data_format=None, dilation_rate=1): + super().__init__() + self.strides = strides + self.padding = padding.lower() + self.data_format = data_format + self.dilation_rate = dilation_rate + + def call(self, inputs, kernel): + return backend.nn.depthwise_conv(inputs, kernel, self.strides, self.padding, self.data_format, self.dilation_rate) + + def compute_output_spec(self, inputs, kernel): + output_shape = operation_utils.compute_conv_output_shape(inputs.shape, kernel.shape[-1] * kernel.shape[-2], kernel.shape[:-2], self.strides, self.padding, self.data_format, self.dilation_rate) + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.depthwise_conv', 'keras.ops.nn.depthwise_conv']) +def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return DepthwiseConv(strides, padding, data_format, dilation_rate).symbolic_call(inputs, kernel) + return backend.nn.depthwise_conv(inputs, kernel, strides, padding, data_format, dilation_rate) + +class SeparableConv(Operation): + + def __init__(self, strides=1, padding='valid', data_format=None, dilation_rate=1): + super().__init__() + self.strides = strides + self.padding = padding.lower() + self.data_format = data_format + self.dilation_rate = dilation_rate + + def call(self, inputs, depthwise_kernel, pointwise_kernel): + return backend.nn.separable_conv(inputs, depthwise_kernel, pointwise_kernel, self.strides, self.padding, self.data_format, self.dilation_rate) + + def compute_output_spec(self, inputs, depthwise_kernel, pointwise_kernel): + output_shape = list(depthwise_conv(inputs, depthwise_kernel, self.strides, self.padding, self.data_format, self.dilation_rate).shape) + if self.data_format == 'channels_last': + output_shape[-1] = pointwise_kernel.shape[-1] + else: + output_shape[1] = pointwise_kernel.shape[-1] + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.separable_conv', 'keras.ops.nn.separable_conv']) +def separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return SeparableConv(strides, padding, data_format, dilation_rate).symbolic_call(inputs, depthwise_kernel, pointwise_kernel) + return backend.nn.separable_conv(inputs, depthwise_kernel, pointwise_kernel, strides, padding, data_format, dilation_rate) + +class ConvTranspose(Operation): + + def __init__(self, strides, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + super().__init__() + self.strides = strides + self.output_padding = output_padding + self.padding = padding.lower() + self.data_format = data_format + self.dilation_rate = dilation_rate + + def call(self, inputs, kernel): + return backend.nn.conv_transpose(inputs, kernel, self.strides, self.output_padding, self.padding, self.data_format, self.dilation_rate) + + def compute_output_spec(self, inputs, kernel): + kernel_size = kernel.shape[:-2] + filters = kernel.shape[-2] + output_shape = compute_conv_transpose_output_shape(inputs.shape, kernel_size, filters, self.strides, self.padding, self.output_padding, self.data_format, self.dilation_rate) + return KerasTensor(output_shape, dtype=inputs.dtype) + +@keras_export(['keras.ops.conv_transpose', 'keras.ops.nn.conv_transpose']) +def conv_transpose(inputs, kernel, strides, padding='valid', output_padding=None, data_format=None, dilation_rate=1): + data_format = standardize_data_format(data_format) + padding = padding.lower() + if any_symbolic_tensors((inputs,)): + return ConvTranspose(strides, padding, output_padding, data_format, dilation_rate).symbolic_call(inputs, kernel) + return backend.nn.conv_transpose(inputs, kernel, strides, padding, output_padding, data_format, dilation_rate) + +class OneHot(Operation): + + def __init__(self, num_classes, axis=-1, dtype=None, sparse=False): + super().__init__() + self.num_classes = num_classes + self.axis = axis + self.dtype = dtype or backend.floatx() + self.sparse = sparse + + def call(self, x): + return backend.nn.one_hot(x, self.num_classes, axis=self.axis, dtype=self.dtype, sparse=self.sparse) + + def compute_output_spec(self, x): + x_shape = list(getattr(x, 'shape', [])) + if self.axis == -1: + x_shape.append(self.num_classes) + elif self.axis >= 0 and self.axis < len(x_shape): + x_shape.insert(self.axis, self.num_classes) + else: + raise ValueError(f'axis must be -1 or between [0, {len(x.shape)}), but received {self.axis}.') + return KerasTensor(x_shape, dtype=self.dtype, sparse=self.sparse) + +@keras_export(['keras.ops.one_hot', 'keras.ops.nn.one_hot']) +def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False): + if any_symbolic_tensors((x,)): + return OneHot(num_classes, axis=axis, dtype=dtype, sparse=sparse).symbolic_call(x) + return backend.nn.one_hot(x, num_classes, axis=axis, dtype=dtype or backend.floatx(), sparse=sparse) + +class BinaryCrossentropy(Operation): + + def __init__(self, from_logits=False): + super().__init__() + self.from_logits = from_logits + + def call(self, target, output): + return backend.nn.binary_crossentropy(target, output, from_logits=self.from_logits) + + def compute_output_spec(self, target, output): + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + return KerasTensor(output.shape, dtype=output.dtype) + +@keras_export(['keras.ops.binary_crossentropy', 'keras.ops.nn.binary_crossentropy']) +def binary_crossentropy(target, output, from_logits=False): + if any_symbolic_tensors((target, output)): + return BinaryCrossentropy(from_logits=from_logits).symbolic_call(target, output) + return backend.nn.binary_crossentropy(target, output, from_logits=from_logits) + +class CategoricalCrossentropy(Operation): + + def __init__(self, from_logits=False, axis=-1): + super().__init__() + self.from_logits = from_logits + self.axis = axis + + def call(self, target, output): + return backend.nn.categorical_crossentropy(target, output, from_logits=self.from_logits, axis=self.axis) + + def compute_output_spec(self, target, output): + if target.shape != output.shape: + raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') + if len(target.shape) < 1: + raise ValueError(f'Arguments `target` and `output` must be at least rank 1. Received: target.shape={target.shape}, output.shape={output.shape}') + return KerasTensor(output.shape[:-1], dtype=output.dtype) + +@keras_export(['keras.ops.categorical_crossentropy', 'keras.ops.nn.categorical_crossentropy']) +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + if any_symbolic_tensors((target, output)): + return CategoricalCrossentropy(from_logits=from_logits, axis=axis).symbolic_call(target, output) + return backend.nn.categorical_crossentropy(target, output, from_logits=from_logits, axis=axis) + +class SparseCategoricalCrossentropy(Operation): + + def __init__(self, from_logits=False, axis=-1): + super().__init__() + self.from_logits = from_logits + self.axis = axis + + def call(self, target, output): + return backend.nn.sparse_categorical_crossentropy(target, output, from_logits=self.from_logits, axis=self.axis) + + def compute_output_spec(self, target, output): + if len(output.shape) < 1: + raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}') + target_shape = target.shape + if len(target_shape) == len(output.shape) and target_shape[-1] == 1: + target_shape = target_shape[:-1] + if target_shape != output.shape[:-1]: + raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}') + return KerasTensor(output.shape[:-1], dtype=output.dtype) + +@keras_export(['keras.ops.sparse_categorical_crossentropy', 'keras.ops.nn.sparse_categorical_crossentropy']) +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + if any_symbolic_tensors((target, output)): + return SparseCategoricalCrossentropy(from_logits=from_logits, axis=axis).symbolic_call(target, output) + return backend.nn.sparse_categorical_crossentropy(target, output, from_logits=from_logits, axis=axis) + +class MultiHot(Operation): + + def __init__(self, num_classes=None, axis=-1, dtype=None, sparse=False, **kwargs): + if num_classes is None and 'num_tokens' in kwargs: + num_classes = kwargs.pop('num_tokens') + if num_classes is None: + raise ValueError('Argument `num_classes` must be specified.') + super().__init__(**kwargs) + self.num_classes = num_classes + self.axis = axis + self.dtype = dtype or backend.floatx() + self.sparse = sparse + + def call(self, inputs): + return backend.nn.multi_hot(inputs, num_classes=self.num_classes, axis=self.axis, dtype=self.dtype) + + def compute_output_spec(self, inputs): + x_shape = list(getattr(inputs, 'shape', [])) + if self.axis == -1: + x_shape.append(self.num_classes) + elif self.axis >= 0 and self.axis < len(x_shape): + x_shape.insert(self.axis, self.num_classes) + else: + raise ValueError(f'axis must be -1 or between [0, {len(inputs.shape)}), but received {self.axis}.') + if len(x_shape) == 2: + x_shape = [x_shape[-1]] + else: + x_shape = [x_shape[0]] + x_shape[2:] + return KerasTensor(x_shape, dtype=inputs.dtype, sparse=self.sparse) + +@keras_export(['keras.ops.multi_hot', 'keras.ops.nn.multi_hot']) +def multi_hot(inputs, num_classes=None, axis=-1, dtype=None, sparse=False, **kwargs): + if num_classes is None and 'num_tokens' in kwargs: + num_classes = kwargs.pop('num_tokens') + if num_classes is None: + raise ValueError('Argument `num_classes` must be specified.') + if any_symbolic_tensors((inputs,)): + return MultiHot(num_classes, axis, dtype, sparse).symbolic_call(inputs) + return backend.nn.multi_hot(inputs, num_classes, axis, dtype, sparse) + +class Moments(Operation): + + def __init__(self, axes, keepdims=False, synchronized=False): + super().__init__() + self.axes = axes + self.keepdims = keepdims + self.synchronized = synchronized + + def call(self, x): + return backend.nn.moments(x, axes=self.axes, keepdims=self.keepdims, synchronized=self.synchronized) + + def compute_output_spec(self, x): + return (KerasTensor(reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims), dtype=x.dtype), KerasTensor(reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims), dtype=x.dtype)) + +@keras_export(['keras.ops.moments', 'keras.ops.nn.moments']) +def moments(x, axes, keepdims=False, synchronized=False): + if any_symbolic_tensors((x,)): + return Moments(axes, keepdims, synchronized=synchronized).symbolic_call(x) + return backend.nn.moments(x, axes, keepdims, synchronized=synchronized) + +class BatchNorm(Operation): + + def __init__(self, axis, epsilon): + super().__init__() + self.axis = axis + self.epsilon = epsilon + + def _check_shape(self, name, shape, expected_shape): + if shape != expected_shape: + raise ValueError(f'Arguments `{name}` must be a vector of length `x.shape[axis]`. Expected: `{expected_shape}`. Received: `{shape}.') + + def compute_output_spec(self, x, mean, variance, offset, scale): + shape = (x.shape[self.axis],) + self._check_shape('mean', tuple(mean.shape), shape) + self._check_shape('variance', tuple(variance.shape), shape) + if offset is not None: + self._check_shape('offset', tuple(offset.shape), shape) + if offset is not scale: + self._check_shape('scale', tuple(scale.shape), shape) + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.batch_normalization', 'keras.ops.nn.batch_normalization']) +def batch_normalization(x, mean, variance, axis, offset=None, scale=None, epsilon=0.001): + if any_symbolic_tensors((x, mean, variance, offset, scale)): + return BatchNorm(axis, epsilon).symbolic_call(x, mean, variance, offset, scale) + return backend.nn.batch_normalization(x, mean, variance, axis, offset, scale, epsilon) + +class CTCLoss(Operation): + + def __init__(self, mask_index=0): + super().__init__() + self.mask_index = mask_index + + def call(self, target, output, target_length, output_length): + return backend.nn.ctc_loss(target, output, target_length, output_length, self.mask_index) + + def _check_shape_first_dim(self, name1, shape1, name2, shape2): + if shape1[0] != shape2[0]: + raise ValueError(f'Arguments `{name1}` and `{name2}` must have the same first dimension. Received shapes: `{shape1}` and `{shape2}`.') + + def compute_output_spec(self, target, output, target_length, output_length): + self._check_shape_first_dim('target', target.shape, 'output', output.shape) + self._check_shape_first_dim('target_length', target_length.shape, 'target', target.shape) + self._check_shape_first_dim('output_length', output_length.shape, 'output', output.shape) + dtype = backend.result_type(output.dtype, 'float32') + return KerasTensor((target.shape[0],), dtype=dtype) + +@keras_export(['keras.ops.ctc_loss', 'keras.ops.nn.ctc_loss']) +def ctc_loss(target, output, target_length, output_length, mask_index=0): + if any_symbolic_tensors((target, output, target_length, output_length)): + return CTCLoss(mask_index).symbolic_call(target, output, target_length, output_length) + return backend.nn.ctc_loss(target, output, target_length, output_length, mask_index) + +class CTCDecode(Operation): + + def __init__(self, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + super().__init__() + self.strategy = strategy + self.beam_width = beam_width + self.top_paths = top_paths + self.merge_repeated = merge_repeated + self.mask_index = mask_index + + def call(self, inputs, sequence_lengths): + return backend.nn.ctc_decode(inputs, sequence_lengths, strategy=self.strategy, beam_width=self.beam_width, top_paths=self.top_paths, merge_repeated=self.merge_repeated, mask_index=self.mask_index) + + def compute_output_spec(self, inputs, sequence_lengths): + inputs_shape = inputs.shape + if self.strategy == 'greedy': + top_paths = 1 + else: + top_paths = self.top_paths + dtype = backend.result_type(inputs.dtype, 'float32') + return (KerasTensor((top_paths, inputs_shape[0], inputs_shape[1]), dtype='int32'), KerasTensor((inputs_shape[0], top_paths), dtype=dtype)) + +@keras_export(['keras.ops.ctc_decode', 'keras.ops.nn.ctc_decode']) +def ctc_decode(inputs, sequence_lengths, strategy='greedy', beam_width=100, top_paths=1, merge_repeated=True, mask_index=0): + if any_symbolic_tensors((inputs, sequence_lengths)): + return CTCDecode(strategy=strategy, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated, mask_index=mask_index).symbolic_call(inputs, sequence_lengths) + return backend.nn.ctc_decode(inputs=inputs, sequence_lengths=sequence_lengths, strategy=strategy, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated, mask_index=mask_index) + +class Normalize(Operation): + + def __init__(self, axis=-1, order=2, epsilon=None): + super().__init__() + self.axis = axis + self.order = order + self.epsilon = epsilon + + def compute_output_spec(self, x): + return KerasTensor(shape=x.shape) + + def call(self, x): + return _normalize(x, axis=self.axis, order=self.order, epsilon=self.epsilon) + +@keras_export(['keras.ops.normalize', 'keras.ops.nn.normalize']) +def normalize(x, axis=-1, order=2, epsilon=None): + if any_symbolic_tensors((x,)): + return Normalize(axis=axis, order=order, epsilon=epsilon).symbolic_call(x) + return _normalize(x, axis=axis, order=order, epsilon=epsilon) + +def _normalize(x, axis=-1, order=2, epsilon=None): + if not isinstance(order, int) or not order >= 1: + raise ValueError(f'Argument `order` must be an int >= 1. Received: order={order}') + x = backend.convert_to_tensor(x) + if len(x.shape) == 0: + x = backend.numpy.expand_dims(x, axis=0) + if epsilon is None: + epsilon = backend.epsilon() + if 2 == order: + square_sum = backend.numpy.sum(backend.numpy.square(x), axis=axis, keepdims=True) + inv_norm = backend.math.rsqrt(square_sum) + inv_norm = backend.numpy.minimum(inv_norm, 1.0 / epsilon) + return x * inv_norm + norm = backend.linalg.norm(x, ord=order, axis=axis, keepdims=True) + denom = backend.numpy.maximum(norm, epsilon) + return backend.numpy.divide(x, denom) + +class PSNR(Operation): + + def __init__(self, max_val): + super().__init__() + self.max_val = max_val + + def call(self, x1, x2): + return backend.nn.psnr(x1=x1, x2=x2, max_val=self.max_val) + + def compute_output_spec(self, x1, x2): + if len(x1.shape) != len(x2.shape): + raise ValueError('Inputs must have the same rank') + return KerasTensor(shape=()) + +@keras_export(['keras.ops.psnr', 'keras.ops.nn.psnr']) +def psnr(x1, x2, max_val): + if any_symbolic_tensors((x1, x2)): + return PSNR(max_val).symbolic_call(x1, x2) + return backend.nn.psnr(x1, x2, max_val) + +# File: keras-master/keras/src/ops/node.py +import collections +from keras.src import tree +from keras.src.backend import KerasTensor +from keras.src.ops.symbolic_arguments import SymbolicArguments + +class Node: + + def __init__(self, operation, call_args=None, call_kwargs=None, outputs=None): + self.operation = operation + self.arguments = SymbolicArguments(*call_args, **call_kwargs) + self.outputs = [] if outputs is None else tree.flatten(outputs) + for x in self.outputs: + if not isinstance(x, KerasTensor): + raise ValueError(f'All operation outputs must be tensors. Operation {operation} returned a non-tensor. Non-tensor received: {x}') + zero_history = any((not x.record_history for x in self.arguments.keras_tensors)) + if not zero_history: + for tensor in self.arguments.keras_tensors: + if not hasattr(tensor, '_keras_history'): + tensor._keras_history = KerasHistory(operation=None, node_index=0, tensor_index=0) + self.operation._inbound_nodes.append(self) + for kt in self.arguments.keras_tensors: + inbound_op = kt._keras_history.operation + if inbound_op is not None: + inbound_op._outbound_nodes.append(self) + if not zero_history: + node_index = len(self.operation._inbound_nodes) - 1 + for (i, tensor) in enumerate(self.outputs): + tensor._keras_history = KerasHistory(operation=operation, node_index=node_index, tensor_index=i) + self.is_input = not self.arguments.keras_tensors + + def __repr__(self): + return f'' + + @property + def input_tensors(self): + return self.arguments.keras_tensors + + @property + def output_tensors(self): + return self.outputs + + @property + def parent_nodes(self): + node_deps = [] + for kt in self.arguments.keras_tensors: + op = kt._keras_history.operation + node_index = kt._keras_history.node_index + if op is not None: + node_deps.append(op._inbound_nodes[node_index]) + return node_deps + +class KerasHistory(collections.namedtuple('KerasHistory', ['operation', 'node_index', 'tensor_index'])): + __slots__ = () + +def is_keras_tensor(obj): + return hasattr(obj, '_keras_history') + +# File: keras-master/keras/src/ops/numpy.py +import builtins +import re +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.ops import operation_utils +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import broadcast_shapes +from keras.src.ops.operation_utils import reduce_shape + +def shape_equal(shape1, shape2, axis=None, allow_none=True): + if len(shape1) != len(shape2): + return False + shape1 = list(shape1) + shape2 = list(shape2) + if axis is not None: + if isinstance(axis, int): + axis = [axis] + for ax in axis: + shape1[ax] = -1 + shape2[ax] = -1 + if allow_none: + for i in range(len(shape1)): + if shape1[i] is None: + shape1[i] = shape2[i] + if shape2[i] is None: + shape2[i] = shape1[i] + return shape1 == shape2 + +class Absolute(Operation): + + def call(self, x): + return backend.numpy.absolute(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.absolute', 'keras.ops.numpy.absolute']) +def absolute(x): + if any_symbolic_tensors((x,)): + return Absolute().symbolic_call(x) + return backend.numpy.absolute(x) + +class Abs(Absolute): + pass + +@keras_export(['keras.ops.abs', 'keras.ops.numpy.abs']) +def abs(x): + return absolute(x) + +class Add(Operation): + + def call(self, x1, x2): + return backend.numpy.add(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and x2_sparse + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.add', 'keras.ops.numpy.add']) +def add(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Add().symbolic_call(x1, x2) + return backend.numpy.add(x1, x2) + +class All(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.all(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype='bool') + +@keras_export(['keras.ops.all', 'keras.ops.numpy.all']) +def all(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return All(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.all(x, axis=axis, keepdims=keepdims) + +class Any(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.any(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype='bool') + +@keras_export(['keras.ops.any', 'keras.ops.numpy.any']) +def any(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Any(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.any(x, axis=axis, keepdims=keepdims) + +class Amax(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.amax(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype) + +@keras_export(['keras.ops.amax', 'keras.ops.numpy.amax']) +def amax(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Amax(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.amax(x, axis=axis, keepdims=keepdims) + +class Amin(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.amin(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype) + +@keras_export(['keras.ops.amin', 'keras.ops.numpy.amin']) +def amin(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Amin(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.amin(x, axis=axis, keepdims=keepdims) + +class Append(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x1, x2): + return backend.numpy.append(x1, x2, axis=self.axis) + + def compute_output_spec(self, x1, x2): + x1_shape = x1.shape + x2_shape = x2.shape + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if self.axis is None: + if None in x1_shape or None in x2_shape: + output_shape = [None] + else: + output_shape = [int(np.prod(x1_shape) + np.prod(x2_shape))] + return KerasTensor(output_shape, dtype=dtype) + if not shape_equal(x1_shape, x2_shape, [self.axis]): + raise ValueError(f'`append` requires inputs to have the same shape except the `axis={self.axis}`, but received shape {x1_shape} and {x2_shape}.') + output_shape = list(x1_shape) + output_shape[self.axis] = x1_shape[self.axis] + x2_shape[self.axis] + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.append', 'keras.ops.numpy.append']) +def append(x1, x2, axis=None): + if any_symbolic_tensors((x1, x2)): + return Append(axis=axis).symbolic_call(x1, x2) + return backend.numpy.append(x1, x2, axis=axis) + +class Arange(Operation): + + def call(self, start, stop=None, step=1, dtype=None): + return backend.numpy.arange(start, stop, step=step, dtype=dtype) + + def compute_output_spec(self, start, stop=None, step=1, dtype=None): + if stop is None: + (start, stop) = (0, start) + output_shape = [int(np.ceil((stop - start) / step))] + if dtype is None: + dtypes_to_resolve = [getattr(start, 'dtype', type(start)), getattr(step, 'dtype', type(step))] + if stop is not None: + dtypes_to_resolve.append(getattr(stop, 'dtype', type(stop))) + dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.arange', 'keras.ops.numpy.arange']) +def arange(start, stop=None, step=1, dtype=None): + return backend.numpy.arange(start, stop, step=step, dtype=dtype) + +class Arccos(Operation): + + def call(self, x): + return backend.numpy.arccos(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.arccos', 'keras.ops.numpy.arccos']) +def arccos(x): + if any_symbolic_tensors((x,)): + return Arccos().symbolic_call(x) + return backend.numpy.arccos(x) + +class Arccosh(Operation): + + def call(self, x): + return backend.numpy.arccosh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.arccosh', 'keras.ops.numpy.arccosh']) +def arccosh(x): + if any_symbolic_tensors((x,)): + return Arccosh().symbolic_call(x) + return backend.numpy.arccosh(x) + +class Arcsin(Operation): + + def call(self, x): + return backend.numpy.arcsin(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.arcsin', 'keras.ops.numpy.arcsin']) +def arcsin(x): + if any_symbolic_tensors((x,)): + return Arcsin().symbolic_call(x) + return backend.numpy.arcsin(x) + +class Arcsinh(Operation): + + def call(self, x): + return backend.numpy.arcsinh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.arcsinh', 'keras.ops.numpy.arcsinh']) +def arcsinh(x): + if any_symbolic_tensors((x,)): + return Arcsinh().symbolic_call(x) + return backend.numpy.arcsinh(x) + +class Arctan(Operation): + + def call(self, x): + return backend.numpy.arctan(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.arctan', 'keras.ops.numpy.arctan']) +def arctan(x): + if any_symbolic_tensors((x,)): + return Arctan().symbolic_call(x) + return backend.numpy.arctan(x) + +class Arctan2(Operation): + + def call(self, x1, x2): + return backend.numpy.arctan2(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + outputs_shape = broadcast_shapes(x1_shape, x2_shape) + x1_dtype = backend.standardize_dtype(getattr(x1, 'dtype', backend.floatx())) + x2_dtype = backend.standardize_dtype(getattr(x2, 'dtype', backend.floatx())) + dtype = dtypes.result_type(x1_dtype, x2_dtype, float) + return KerasTensor(outputs_shape, dtype=dtype) + +@keras_export(['keras.ops.arctan2', 'keras.ops.numpy.arctan2']) +def arctan2(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Arctan2().symbolic_call(x1, x2) + return backend.numpy.arctan2(x1, x2) + +class Arctanh(Operation): + + def call(self, x): + return backend.numpy.arctanh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.arctanh', 'keras.ops.numpy.arctanh']) +def arctanh(x): + if any_symbolic_tensors((x,)): + return Arctanh().symbolic_call(x) + return backend.numpy.arctanh(x) + +class Argmax(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.argmax(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + if self.keepdims: + return KerasTensor(x.shape, dtype='int32') + if self.axis is None: + return KerasTensor([], dtype='int32') + return KerasTensor(reduce_shape(x.shape, axis=[self.axis]), dtype='int32') + +@keras_export(['keras.ops.argmax', 'keras.ops.numpy.argmax']) +def argmax(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Argmax(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.argmax(x, axis=axis, keepdims=keepdims) + +class Argmin(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.argmin(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + if self.keepdims: + return KerasTensor(x.shape, dtype='int32') + if self.axis is None: + return KerasTensor([], dtype='int32') + return KerasTensor(reduce_shape(x.shape, axis=[self.axis]), dtype='int32') + +@keras_export(['keras.ops.argmin', 'keras.ops.numpy.argmin']) +def argmin(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Argmin(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.argmin(x, axis=axis, keepdims=keepdims) + +class Argsort(Operation): + + def __init__(self, axis=-1): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.numpy.argsort(x, axis=self.axis) + + def compute_output_spec(self, x): + if self.axis is None: + return KerasTensor([int(np.prod(x.shape))], dtype='int32') + return KerasTensor(x.shape, dtype='int32') + +@keras_export(['keras.ops.argsort', 'keras.ops.numpy.argsort']) +def argsort(x, axis=-1): + if any_symbolic_tensors((x,)): + return Argsort(axis=axis).symbolic_call(x) + return backend.numpy.argsort(x, axis=axis) + +class Array(Operation): + + def call(self, x, dtype=None): + return backend.numpy.array(x, dtype=dtype) + + def compute_output_spec(self, x, dtype=None): + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.array', 'keras.ops.numpy.array']) +def array(x, dtype=None): + if any_symbolic_tensors((x,)): + return Array().symbolic_call(x, dtype=dtype) + return backend.numpy.array(x, dtype=dtype) + +class Average(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x, weights=None): + return backend.numpy.average(x, weights=weights, axis=self.axis) + + def compute_output_spec(self, x, weights=None): + dtypes_to_resolve = [getattr(x, 'dtype', type(x)), float] + if weights is not None: + shape_match = shape_equal(x.shape, weights.shape, allow_none=True) + if self.axis is not None: + shape_match_on_axis = shape_equal([x.shape[self.axis]], weights.shape, allow_none=True) + dtypes_to_resolve.append(getattr(weights, 'dtype', type(weights))) + dtype = dtypes.result_type(*dtypes_to_resolve) + if self.axis is None: + if weights is None or shape_match: + return KerasTensor([], dtype=dtype) + else: + raise ValueError(f'`weights` must have the same shape as `x` when `axis=None`, but received `weights.shape={weights.shape}` and `x.shape={x.shape}`.') + if weights is None or shape_match_on_axis or shape_match: + return KerasTensor(reduce_shape(x.shape, axis=[self.axis]), dtype=dtype) + else: + raise ValueError(f'`weights` must have the same size as `x` at `axis={self.axis}` but received `weights.shape={weights.shape}` while x.shape at `{self.axis}` is `{x.shape[self.axis]}`.') + +@keras_export(['keras.ops.average', 'keras.ops.numpy.average']) +def average(x, axis=None, weights=None): + if any_symbolic_tensors((x,)): + return Average(axis=axis).symbolic_call(x, weights=weights) + return backend.numpy.average(x, weights=weights, axis=axis) + +class Bincount(Operation): + + def __init__(self, weights=None, minlength=0, sparse=False): + super().__init__() + self.weights = weights + self.minlength = minlength + self.sparse = sparse + + def call(self, x): + return backend.numpy.bincount(x, weights=self.weights, minlength=self.minlength, sparse=self.sparse) + + def compute_output_spec(self, x): + dtypes_to_resolve = [x.dtype] + if self.weights is not None: + weights = backend.convert_to_tensor(self.weights) + dtypes_to_resolve.append(weights.dtype) + dtype = dtypes.result_type(*dtypes_to_resolve) + else: + dtype = 'int32' + x_sparse = getattr(x, 'sparse', False) + return KerasTensor(list(x.shape[:-1]) + [None], dtype=dtype, sparse=x_sparse or self.sparse) + +@keras_export(['keras.ops.bincount', 'keras.ops.numpy.bincount']) +def bincount(x, weights=None, minlength=0, sparse=False): + if any_symbolic_tensors((x,)): + return Bincount(weights=weights, minlength=minlength, sparse=sparse).symbolic_call(x) + return backend.numpy.bincount(x, weights=weights, minlength=minlength, sparse=sparse) + +class BitwiseAnd(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.bitwise_and(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.bitwise_and', 'keras.ops.numpy.bitwise_and']) +def bitwise_and(x, y): + if any_symbolic_tensors((x, y)): + return BitwiseAnd().symbolic_call(x, y) + return backend.numpy.bitwise_and(x, y) + +class BitwiseInvert(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return backend.numpy.bitwise_invert(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.bitwise_invert', 'keras.ops.numpy.bitwise_invert']) +def bitwise_invert(x): + if any_symbolic_tensors((x,)): + return BitwiseInvert().symbolic_call(x) + return backend.numpy.bitwise_invert(x) + +class BitwiseNot(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return backend.numpy.bitwise_not(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.bitwise_not', 'keras.ops.numpy.bitwise_not']) +def bitwise_not(x): + if any_symbolic_tensors((x,)): + return BitwiseNot().symbolic_call(x) + return backend.numpy.bitwise_not(x) + +class BitwiseOr(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.bitwise_or(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.bitwise_or', 'keras.ops.numpy.bitwise_or']) +def bitwise_or(x, y): + if any_symbolic_tensors((x, y)): + return BitwiseOr().symbolic_call(x, y) + return backend.numpy.bitwise_or(x, y) + +class BitwiseXor(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.bitwise_xor(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.bitwise_xor', 'keras.ops.numpy.bitwise_xor']) +def bitwise_xor(x, y): + if any_symbolic_tensors((x, y)): + return BitwiseXor().symbolic_call(x, y) + return backend.numpy.bitwise_xor(x, y) + +class BitwiseLeftShift(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.bitwise_left_shift(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.bitwise_left_shift', 'keras.ops.numpy.bitwise_left_shift']) +def bitwise_left_shift(x, y): + if any_symbolic_tensors((x, y)): + return BitwiseLeftShift().symbolic_call(x, y) + return backend.numpy.bitwise_left_shift(x, y) + +class LeftShift(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.left_shift(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.left_shift', 'keras.ops.numpy.left_shift']) +def left_shift(x, y): + if any_symbolic_tensors((x, y)): + return LeftShift().symbolic_call(x, y) + return backend.numpy.left_shift(x, y) + +class BitwiseRightShift(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.bitwise_right_shift(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.bitwise_right_shift', 'keras.ops.numpy.bitwise_right_shift']) +def bitwise_right_shift(x, y): + if any_symbolic_tensors((x, y)): + return BitwiseRightShift().symbolic_call(x, y) + return backend.numpy.bitwise_right_shift(x, y) + +class RightShift(Operation): + + def __init__(self): + super().__init__() + + def call(self, x, y): + return backend.numpy.right_shift(x, y) + + def compute_output_spec(self, x, y): + dtype = dtypes.result_type(x.dtype, y.dtype) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.right_shift', 'keras.ops.numpy.right_shift']) +def right_shift(x, y): + if any_symbolic_tensors((x, y)): + return RightShift().symbolic_call(x, y) + return backend.numpy.right_shift(x, y) + +class BroadcastTo(Operation): + + def __init__(self, shape): + super().__init__() + self.shape = shape + + def call(self, x): + return backend.numpy.broadcast_to(x, self.shape) + + def compute_output_spec(self, x): + broadcast_shapes(x.shape, self.shape) + return KerasTensor(self.shape, dtype=x.dtype) + +@keras_export(['keras.ops.broadcast_to', 'keras.ops.numpy.broadcast_to']) +def broadcast_to(x, shape): + if any_symbolic_tensors((x,)): + return BroadcastTo(shape=shape).symbolic_call(x) + return backend.numpy.broadcast_to(x, shape) + +class Ceil(Operation): + + def call(self, x): + return backend.numpy.ceil(x) + + def compute_output_spec(self, x): + if backend.standardize_dtype(x.dtype) == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.ceil', 'keras.ops.numpy.ceil']) +def ceil(x): + if any_symbolic_tensors((x,)): + return Ceil().symbolic_call(x) + return backend.numpy.ceil(x) + +class Clip(Operation): + + def __init__(self, x_min, x_max): + super().__init__() + self.x_min = x_min + self.x_max = x_max + + def call(self, x): + return backend.numpy.clip(x, self.x_min, self.x_max) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(x.dtype) + if dtype == 'bool': + dtype = 'int32' + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.clip', 'keras.ops.numpy.clip']) +def clip(x, x_min, x_max): + if any_symbolic_tensors((x,)): + return Clip(x_min, x_max).symbolic_call(x) + return backend.numpy.clip(x, x_min, x_max) + +class Concatenate(Operation): + + def __init__(self, axis=0): + super().__init__() + if axis is None: + raise ValueError('`axis` cannot be None for `concatenate`.') + self.axis = axis + + def call(self, xs): + return backend.numpy.concatenate(xs, axis=self.axis) + + def compute_output_spec(self, xs): + first_shape = xs[0].shape + total_size_on_axis = 0 + all_sparse = True + dtypes_to_resolve = [] + for x in xs: + if not shape_equal(x.shape, first_shape, axis=[self.axis], allow_none=True): + raise ValueError(f"Every value in `xs` must have the same shape except on the `axis` dim. But found element of shape {x.shape}, which is different from the first element's shape {first_shape}.") + if total_size_on_axis is None or x.shape[self.axis] is None: + total_size_on_axis = None + else: + total_size_on_axis += x.shape[self.axis] + all_sparse = all_sparse and getattr(x, 'sparse', False) + dtypes_to_resolve.append(getattr(x, 'dtype', type(x))) + output_shape = list(first_shape) + output_shape[self.axis] = total_size_on_axis + dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, dtype=dtype, sparse=all_sparse) + +@keras_export(['keras.ops.concatenate', 'keras.ops.numpy.concatenate']) +def concatenate(xs, axis=0): + if any_symbolic_tensors(xs): + return Concatenate(axis=axis).symbolic_call(xs) + return backend.numpy.concatenate(xs, axis=axis) + +class Conjugate(Operation): + + def call(self, x): + return backend.numpy.conjugate(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.conjugate', 'keras.ops.numpy.conjugate']) +def conjugate(x): + if any_symbolic_tensors((x,)): + return Conjugate().symbolic_call(x) + return backend.numpy.conjugate(x) + +class Conj(Conjugate): + pass + +@keras_export(['keras.ops.conj', 'keras.ops.numpy.conj']) +def conj(x): + return conjugate(x) + +class Copy(Operation): + + def call(self, x): + return backend.numpy.copy(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.copy', 'keras.ops.numpy.copy']) +def copy(x): + if any_symbolic_tensors((x,)): + return Copy().symbolic_call(x) + return backend.numpy.copy(x) + +class Cos(Operation): + + def call(self, x): + return backend.numpy.cos(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.cos', 'keras.ops.numpy.cos']) +def cos(x): + if any_symbolic_tensors((x,)): + return Cos().symbolic_call(x) + return backend.numpy.cos(x) + +class Cosh(Operation): + + def call(self, x): + return backend.numpy.cosh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.cosh', 'keras.ops.numpy.cosh']) +def cosh(x): + if any_symbolic_tensors((x,)): + return Cosh().symbolic_call(x) + return backend.numpy.cosh(x) + +class CountNonzero(Operation): + + def __init__(self, axis=None): + super().__init__() + if isinstance(axis, int): + self.axis = (axis,) + else: + self.axis = axis + + def call(self, x): + return backend.numpy.count_nonzero(x, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis), dtype='int32') + +@keras_export(['keras.ops.count_nonzero', 'keras.ops.numpy.count_nonzero']) +def count_nonzero(x, axis=None): + if any_symbolic_tensors((x,)): + return CountNonzero(axis=axis).symbolic_call(x) + return backend.numpy.count_nonzero(x, axis=axis) + +class Cross(Operation): + + def __init__(self, axisa=-1, axisb=-1, axisc=-1, axis=None): + super().__init__() + if axis is not None: + self.axisa = axis + self.axisb = axis + self.axisc = axis + else: + self.axisa = axisa + self.axisb = axisb + self.axisc = axisc + + def call(self, x1, x2): + return backend.numpy.cross(x1, x2, self.axisa, self.axisb, self.axisc) + + def compute_output_spec(self, x1, x2): + x1_shape = list(x1.shape) + x2_shape = list(x2.shape) + x1_value_size = x1_shape[self.axisa] + x2_value_size = x2_shape[self.axisa] + del x1_shape[self.axisa] + del x2_shape[self.axisb] + output_shape = broadcast_shapes(x1_shape, x2_shape) + if x1_value_size is not None and x1_value_size not in (2, 3): + raise ValueError(f"`x1`'s dim on `axis={{axisa}}` must be either 2 or 3, but received: {x1_value_size}") + if x2_value_size is not None and x2_value_size not in (2, 3): + raise ValueError(f"`x2`'s dim on `axis={{axisb}}` must be either 2 or 3, but received: {x2_value_size}") + if x1_value_size == 3 or x2_value_size == 3: + value_size = [3] + else: + value_size = [] + output_shape = output_shape[:self.axisc] + value_size + output_shape[self.axisc:] + dtype = dtypes.result_type(x1.dtype, x2.dtype) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.cross', 'keras.ops.numpy.cross']) +def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): + if any_symbolic_tensors((x1, x2)): + return Cross(axisa=axisa, axisb=axisb, axisc=axisc, axis=axis).symbolic_call(x1, x2) + return backend.numpy.cross(x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) + +class Cumprod(Operation): + + def __init__(self, axis=None, dtype=None): + super().__init__() + self.axis = axis + self.dtype = dtype + + def call(self, x): + return backend.numpy.cumprod(x, axis=self.axis, dtype=self.dtype) + + def compute_output_spec(self, x): + if self.axis is None: + if None in x.shape: + output_shape = (None,) + else: + output_shape = (int(np.prod(x.shape)),) + else: + output_shape = x.shape + output_dtype = backend.standardize_dtype(self.dtype or x.dtype) + if output_dtype == 'bool': + output_dtype = 'int32' + return KerasTensor(output_shape, output_dtype) + +@keras_export(['keras.ops.cumprod', 'keras.ops.numpy.cumprod']) +def cumprod(x, axis=None, dtype=None): + return Cumprod(axis=axis, dtype=dtype)(x) + +class Cumsum(Operation): + + def __init__(self, axis=None, dtype=None): + super().__init__() + self.axis = axis + self.dtype = dtype + + def call(self, x): + return backend.numpy.cumsum(x, axis=self.axis, dtype=self.dtype) + + def compute_output_spec(self, x): + if self.axis is None: + if None in x.shape: + output_shape = (None,) + else: + output_shape = (int(np.prod(x.shape)),) + else: + output_shape = x.shape + output_dtype = backend.standardize_dtype(self.dtype or x.dtype) + if output_dtype == 'bool': + output_dtype = 'int32' + return KerasTensor(output_shape, output_dtype) + +@keras_export(['keras.ops.cumsum', 'keras.ops.numpy.cumsum']) +def cumsum(x, axis=None, dtype=None): + return Cumsum(axis=axis, dtype=dtype)(x) + +class Diag(Operation): + + def __init__(self, k=0): + super().__init__() + self.k = k + + def call(self, x): + return backend.numpy.diag(x, k=self.k) + + def compute_output_spec(self, x): + x_shape = x.shape + if len(x_shape) == 1: + if x_shape[0] is None: + output_shape = [None, None] + else: + output_shape = [x_shape[0] + int(np.abs(self.k)), x_shape[0] + int(np.abs(self.k))] + elif len(x_shape) == 2: + if None in x_shape: + output_shape = [None] + else: + shorter_side = np.minimum(x_shape[0], x_shape[1]) + if self.k > 0: + remaining = x_shape[1] - self.k + else: + remaining = x_shape[0] + self.k + output_shape = [int(np.maximum(0, np.minimum(remaining, shorter_side)))] + else: + raise ValueError(f'`x` must be 1-D or 2-D, but received shape {x.shape}.') + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.diag', 'keras.ops.numpy.diag']) +def diag(x, k=0): + if any_symbolic_tensors((x,)): + return Diag(k=k).symbolic_call(x) + return backend.numpy.diag(x, k=k) + +class Diagonal(Operation): + + def __init__(self, offset=0, axis1=0, axis2=1): + super().__init__() + self.offset = offset + self.axis1 = axis1 + self.axis2 = axis2 + + def call(self, x): + return backend.numpy.diagonal(x, offset=self.offset, axis1=self.axis1, axis2=self.axis2) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + if len(x_shape) < 2: + raise ValueError('`diagonal` requires an array of at least two dimensions, but `x` is of shape {x.shape}.') + shape_2d = [x_shape[self.axis1], x_shape[self.axis2]] + x_shape[self.axis1] = -1 + x_shape[self.axis2] = -1 + output_shape = list(filter((-1).__ne__, x_shape)) + if None in shape_2d: + diag_shape = [None] + else: + shorter_side = np.minimum(shape_2d[0], shape_2d[1]) + if self.offset > 0: + remaining = shape_2d[1] - self.offset + else: + remaining = shape_2d[0] + self.offset + diag_shape = [int(np.maximum(0, np.minimum(remaining, shorter_side)))] + output_shape = output_shape + diag_shape + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.diagonal', 'keras.ops.numpy.diagonal']) +def diagonal(x, offset=0, axis1=0, axis2=1): + if any_symbolic_tensors((x,)): + return Diagonal(offset=offset, axis1=axis1, axis2=axis2).symbolic_call(x) + return backend.numpy.diagonal(x, offset=offset, axis1=axis1, axis2=axis2) + +class Diff(Operation): + + def __init__(self, n=1, axis=-1): + super().__init__() + self.n = n + self.axis = axis + + def call(self, a): + return backend.numpy.diff(a, n=self.n, axis=self.axis) + + def compute_output_spec(self, a): + shape = list(a.shape) + size = shape[self.axis] + if size is not None: + shape[self.axis] = builtins.max(size - self.n, 0) + return KerasTensor(shape, dtype=a.dtype) + +@keras_export(['keras.ops.diff', 'keras.ops.numpy.diff']) +def diff(a, n=1, axis=-1): + return Diff(n=n, axis=axis)(a) + +class Digitize(Operation): + + def call(self, x, bins): + return backend.numpy.digitize(x, bins) + + def compute_output_spec(self, x, bins): + bins_shape = bins.shape + if len(bins_shape) > 1: + raise ValueError(f'`bins` must be a 1D array. Received: bins={bins} with shape bins.shape={bins_shape}') + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype='int32', sparse=sparse) + +@keras_export(['keras.ops.digitize', 'keras.ops.numpy.digitize']) +def digitize(x, bins): + if any_symbolic_tensors((x, bins)): + return Digitize().symbolic_call(x, bins) + return backend.numpy.digitize(x, bins) + +class Dot(Operation): + + def call(self, x1, x2): + return backend.numpy.dot(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = list(getattr(x1, 'shape', [])) + x2_shape = list(getattr(x2, 'shape', [])) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if x1_shape == [] or x2_shape == []: + return multiply(x1, x2) + if len(x1_shape) == 1 and len(x2_shape) == 1: + return KerasTensor([], dtype=dtype) + if len(x2_shape) == 1: + if x1_shape[-1] != x2_shape[0]: + raise ValueError(f'Shape must match on the last axis of `x1` and `x2` when `x1` is N-d array while `x2` is 1-D, but receive shape `x1.shape={x1.shape}` and x2.shape=`{x2.shape}`.') + return KerasTensor(x1_shape[:-1], dtype=dtype) + if x1_shape[-1] is None or x2_shape[-2] is None or x1_shape[-1] == x2_shape[-2]: + del x1_shape[-1] + del x2_shape[-2] + return KerasTensor(x1_shape + x2_shape, dtype=dtype) + raise ValueError(f'Shape must match on the last axis of `x1` and second last axis of `x2` when `x1` is N-d array while `x2` is M-D, but received `x1.shape={x1.shape}` and x2.shape=`{x2.shape}`.') + +@keras_export(['keras.ops.dot', 'keras.ops.numpy.dot']) +def dot(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Dot().symbolic_call(x1, x2) + return backend.numpy.dot(x1, x2) + +class Einsum(Operation): + + def __init__(self, subscripts): + super().__init__() + self.subscripts = subscripts + + def call(self, *operands): + return backend.numpy.einsum(self.subscripts, *operands) + + def compute_output_spec(self, *operands): + split_subscripts = self.subscripts.split('->') + if len(split_subscripts) > 2: + raise ValueError(f"At most one '->' is supported in `einsum` subscripts, but received {self.subscripts}.") + if len(split_subscripts) == 2: + subscripts = split_subscripts[0] + output_spec = split_subscripts[1] + else: + subscripts = self.subscripts + output_spec = None + input_specs = subscripts.split(',') + if len(input_specs) != len(operands): + raise ValueError(f'Number of operands ({len(operands)}) does not match the number of input specs ({len(input_specs)}) in `einsum`, received subscripts={self.subscripts}.') + reduced_dims = set() + kept_dims = set() + for s in subscripts: + if not s.isalpha(): + continue + if s not in reduced_dims and s not in kept_dims: + kept_dims.add(s) + elif s in kept_dims: + kept_dims.remove(s) + reduced_dims.add(s) + if output_spec is not None: + kept_dims_copy = kept_dims.copy() + reduced_dims_copy = reduced_dims.copy() + for dim in kept_dims: + if dim not in output_spec: + kept_dims_copy.remove(dim) + reduced_dims_copy.add(dim) + for dim in reduced_dims: + if dim in output_spec: + reduced_dims_copy.remove(dim) + kept_dims_copy.add(dim) + kept_dims = kept_dims_copy + reduced_dims = reduced_dims_copy + reduced_dims = sorted(reduced_dims) + kept_dims = sorted(kept_dims) + if output_spec is None: + target_broadcast_spec = '...' + ''.join(kept_dims) + else: + target_broadcast_spec = output_spec + expanded_operands_shapes = [] + for (x, spec) in zip(operands, input_specs): + x_shape = getattr(x, 'shape', []) + x_shape = [-1 if size is None else size for size in x_shape] + split_spec = spec.split('...') + expanded_shape = target_broadcast_spec + if len(split_spec) == 1: + if len(x_shape) != len(split_spec[0]): + raise ValueError(f'Number of dimensions in the subscript does not match the number of dimensions in the operand, received subscript `{spec}` and operand of shape {x_shape}.') + for (size, s) in zip(x_shape, split_spec[0]): + expanded_shape = expanded_shape.replace(s, str(size) + ' ') + expanded_shape = expanded_shape.replace('...', '') + else: + for i in range(len(split_spec[0])): + expanded_shape = expanded_shape.replace(split_spec[0][i], str(x_shape[i]) + ' ') + for i in range(len(split_spec[1])): + expanded_shape = expanded_shape.replace(split_spec[1][-i - 1], str(x_shape[-i - 1]) + ' ') + wildcard_shape_start_index = len(split_spec[0]) + wildcard_shape_end_index = len(x_shape) if len(split_spec[1]) == 0 else -len(split_spec[1]) + wildcard_shape = x_shape[wildcard_shape_start_index:wildcard_shape_end_index] + wildcard_shape_str = ' '.join([str(size) for size in wildcard_shape]) + ' ' + expanded_shape = expanded_shape.replace('...', wildcard_shape_str) + expanded_shape = re.sub('[a-z]', '1 ', expanded_shape) + expanded_shape = expanded_shape.split() + expanded_shape = [None if size == '-1' else int(size) for size in expanded_shape] + expanded_operands_shapes.append(expanded_shape) + output_shape = expanded_operands_shapes[0] + for shape in expanded_operands_shapes[1:]: + output_shape = broadcast_shapes(output_shape, shape) + dtypes_to_resolve = list(set((backend.standardize_dtype(getattr(x, 'dtype', type(x))) for x in operands))) + if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == 'int8': + dtype = 'int32' + else: + dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.einsum', 'keras.ops.numpy.einsum']) +def einsum(subscripts, *operands): + if any_symbolic_tensors(operands): + return Einsum(subscripts).symbolic_call(*operands) + return backend.numpy.einsum(subscripts, *operands) + +class Empty(Operation): + + def call(self, shape, dtype=None): + return backend.numpy.empty(shape, dtype=dtype) + + def compute_output_spec(self, shape, dtype=None): + dtype = dtype or backend.floatx() + return KerasTensor(shape, dtype=dtype) + +@keras_export(['keras.ops.empty', 'keras.ops.numpy.empty']) +def empty(shape, dtype=None): + return backend.numpy.empty(shape, dtype=dtype) + +class Equal(Operation): + + def call(self, x1, x2): + return backend.numpy.equal(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.equal', 'keras.ops.numpy.equal']) +def equal(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Equal().symbolic_call(x1, x2) + return backend.numpy.equal(x1, x2) + +class Exp(Operation): + + def call(self, x): + return backend.numpy.exp(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(x.dtype) + if 'int' in dtype or dtype == 'bool': + dtype = backend.floatx() + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.exp', 'keras.ops.numpy.exp']) +def exp(x): + if any_symbolic_tensors((x,)): + return Exp().symbolic_call(x) + return backend.numpy.exp(x) + +class ExpandDims(Operation): + + def __init__(self, axis): + super().__init__() + if not isinstance(axis, (int, tuple, list)): + raise ValueError(f'The `axis` argument to `expand_dims` should be an integer, tuple or list. Received axis={axis}') + self.axis = axis + + def call(self, x): + return backend.numpy.expand_dims(x, self.axis) + + def compute_output_spec(self, x): + output_shape = operation_utils.compute_expand_dims_output_shape(x.shape, self.axis) + sparse = getattr(x, 'sparse', False) + return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.expand_dims', 'keras.ops.numpy.expand_dims']) +def expand_dims(x, axis): + if any_symbolic_tensors((x,)): + return ExpandDims(axis=axis).symbolic_call(x) + return backend.numpy.expand_dims(x, axis) + +class Expm1(Operation): + + def call(self, x): + return backend.numpy.expm1(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(x.dtype) + if 'int' in dtype or dtype == 'bool': + dtype = backend.floatx() + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.expm1', 'keras.ops.numpy.expm1']) +def expm1(x): + if any_symbolic_tensors((x,)): + return Expm1().symbolic_call(x) + return backend.numpy.expm1(x) + +class Flip(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.numpy.flip(x, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.flip', 'keras.ops.numpy.flip']) +def flip(x, axis=None): + if any_symbolic_tensors((x,)): + return Flip(axis=axis).symbolic_call(x) + return backend.numpy.flip(x, axis=axis) + +class Floor(Operation): + + def call(self, x): + return backend.numpy.floor(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.floor', 'keras.ops.numpy.floor']) +def floor(x): + if any_symbolic_tensors((x,)): + return Floor().symbolic_call(x) + return backend.numpy.floor(x) + +class Full(Operation): + + def call(self, shape, fill_value, dtype=None): + return backend.numpy.full(shape, fill_value, dtype=dtype) + + def compute_output_spec(self, shape, fill_value, dtype=None): + dtype = dtype or backend.floatx() + return KerasTensor(shape, dtype=dtype) + +@keras_export(['keras.ops.full', 'keras.ops.numpy.full']) +def full(shape, fill_value, dtype=None): + return backend.numpy.full(shape, fill_value, dtype=dtype) + +class FullLike(Operation): + + def call(self, x, fill_value, dtype=None): + return backend.numpy.full_like(x, fill_value, dtype=dtype) + + def compute_output_spec(self, x, fill_value, dtype=None): + dtype = dtype or x.dtype + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.full_like', 'keras.ops.numpy.full_like']) +def full_like(x, fill_value, dtype=None): + if any_symbolic_tensors((x,)): + return FullLike().symbolic_call(x, fill_value, dtype=dtype) + return backend.numpy.full_like(x, fill_value, dtype=dtype) + +class GetItem(Operation): + + def call(self, x, key): + if isinstance(key, list): + key = tuple(key) + return x[key] + + def compute_output_spec(self, x, key): + remaining_shape = list(x.shape) + new_shape = [] + if isinstance(key, int): + remaining_key = [key] + elif isinstance(key, tuple): + remaining_key = list(key) + elif isinstance(key, list): + remaining_key = key.copy() + else: + raise ValueError(f'Unsupported key type for array slice. Recieved: `{key}`') + num_ellipses = remaining_key.count(Ellipsis) + if num_ellipses > 1: + raise ValueError(f'Slice should only have one ellipsis. Recieved: `{key}`') + elif num_ellipses == 0: + remaining_key.append(Ellipsis) + while True: + if not remaining_key: + break + subkey = remaining_key.pop(0) + if subkey == Ellipsis: + needed = len(remaining_key) - remaining_key.count(np.newaxis) + consumed = len(remaining_shape) - needed + new_shape += remaining_shape[:consumed] + remaining_shape = remaining_shape[consumed:] + continue + if subkey == np.newaxis: + new_shape.append(1) + continue + if not remaining_shape: + raise ValueError(f'Array has shape {x.shape} but slice has to many indices. Received: `{key}`') + length = remaining_shape.pop(0) + if isinstance(subkey, int): + if length is not None: + index = subkey if subkey >= 0 else subkey + length + if index < 0 or index >= length: + raise ValueError(f'Array has shape {x.shape} but out-of-bounds index {key} was requested.') + elif isinstance(subkey, slice): + if length is not None: + new_length = len(range(*subkey.indices(length))) + new_shape.append(new_length) + else: + new_shape.append(length) + else: + raise ValueError(f'Unsupported key type for array slice. Received: `{key}`') + return KerasTensor(tuple(new_shape), dtype=x.dtype) + +@keras_export(['keras.ops.get_item', 'keras.ops.numpy.get_item']) +def get_item(x, key): + if any_symbolic_tensors((x,)): + return GetItem().symbolic_call(x, key) + return x[key] + +class Greater(Operation): + + def call(self, x1, x2): + return backend.numpy.greater(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.greater', 'keras.ops.numpy.greater']) +def greater(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Greater().symbolic_call(x1, x2) + return backend.numpy.greater(x1, x2) + +class GreaterEqual(Operation): + + def call(self, x1, x2): + return backend.numpy.greater_equal(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.greater_equal', 'keras.ops.numpy.greater_equal']) +def greater_equal(x1, x2): + if any_symbolic_tensors((x1, x2)): + return GreaterEqual().symbolic_call(x1, x2) + return backend.numpy.greater_equal(x1, x2) + +class Hstack(Operation): + + def call(self, xs): + return backend.numpy.hstack(xs) + + def compute_output_spec(self, xs): + first_shape = xs[0].shape + total_size_on_axis = 0 + dtypes_to_resolve = [] + for x in xs: + if not shape_equal(x.shape, first_shape, axis=[1], allow_none=True): + raise ValueError(f"Every value in `xs` must have the same shape except on the `axis` dim. But found element of shape {x.shape}, which is different from the first element's shape {first_shape}.") + if total_size_on_axis is None or x.shape[1] is None: + total_size_on_axis = None + else: + total_size_on_axis += x.shape[1] + dtypes_to_resolve.append(getattr(x, 'dtype', type(x))) + output_shape = list(first_shape) + output_shape[1] = total_size_on_axis + dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.hstack', 'keras.ops.numpy.hstack']) +def hstack(xs): + if any_symbolic_tensors((xs,)): + return Hstack().symbolic_call(xs) + return backend.numpy.hstack(xs) + +class Identity(Operation): + + def call(self, n, dtype=None): + return backend.numpy.identity(n, dtype=dtype) + + def compute_output_spec(self, n, dtype=None): + dtype = dtype or backend.floatx() + return KerasTensor([n, n], dtype=dtype) + +@keras_export(['keras.ops.identity', 'keras.ops.numpy.identity']) +def identity(n, dtype=None): + return backend.numpy.identity(n, dtype=dtype) + +class Imag(Operation): + + def call(self, x): + return backend.numpy.imag(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.imag', 'keras.ops.numpy.imag']) +def imag(x): + if any_symbolic_tensors((x,)): + return Imag().symbolic_call(x) + return backend.numpy.imag(x) + +class Isclose(Operation): + + def call(self, x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + return backend.numpy.isclose(x1, x2, rtol, atol, equal_nan) + + def compute_output_spec(self, x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.isclose', 'keras.ops.numpy.isclose']) +def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False): + if any_symbolic_tensors((x1, x2)): + return Isclose().symbolic_call(x1, x2, rtol, atol, equal_nan) + return backend.numpy.isclose(x1, x2, rtol, atol, equal_nan) + +class Isfinite(Operation): + + def call(self, x): + return backend.numpy.isfinite(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype='bool') + +@keras_export(['keras.ops.isfinite', 'keras.ops.numpy.isfinite']) +def isfinite(x): + if any_symbolic_tensors((x,)): + return Isfinite().symbolic_call(x) + return backend.numpy.isfinite(x) + +class Isinf(Operation): + + def call(self, x): + return backend.numpy.isinf(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype='bool') + +@keras_export(['keras.ops.isinf', 'keras.ops.numpy.isinf']) +def isinf(x): + if any_symbolic_tensors((x,)): + return Isinf().symbolic_call(x) + return backend.numpy.isinf(x) + +class Isnan(Operation): + + def call(self, x): + return backend.numpy.isnan(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype='bool') + +@keras_export(['keras.ops.isnan', 'keras.ops.numpy.isnan']) +def isnan(x): + if any_symbolic_tensors((x,)): + return Isnan().symbolic_call(x) + return backend.numpy.isnan(x) + +class Less(Operation): + + def call(self, x1, x2): + return backend.numpy.less(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.less', 'keras.ops.numpy.less']) +def less(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Less().symbolic_call(x1, x2) + return backend.numpy.less(x1, x2) + +class LessEqual(Operation): + + def call(self, x1, x2): + return backend.numpy.less_equal(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.less_equal', 'keras.ops.numpy.less_equal']) +def less_equal(x1, x2): + if any_symbolic_tensors((x1, x2)): + return LessEqual().symbolic_call(x1, x2) + return backend.numpy.less_equal(x1, x2) + +class Linspace(Operation): + + def __init__(self, num=50, endpoint=True, retstep=False, dtype=float, axis=0): + super().__init__() + self.num = num + self.endpoint = endpoint + self.retstep = retstep + self.dtype = dtype + self.axis = axis + + def call(self, start, stop): + return backend.numpy.linspace(start, stop, num=self.num, endpoint=self.endpoint, retstep=self.retstep, dtype=self.dtype, axis=self.axis) + + def compute_output_spec(self, start, stop): + start_shape = getattr(start, 'shape', []) + stop_shape = getattr(stop, 'shape', []) + output_shape = broadcast_shapes(start_shape, stop_shape) + if self.axis == -1: + output_shape = output_shape + [self.num] + elif self.axis >= 0: + output_shape = output_shape[:self.axis] + [self.num] + output_shape[self.axis:] + else: + output_shape = output_shape[:self.axis + 1] + [self.num] + output_shape[self.axis + 1:] + dtype = self.dtype if self.dtype is not None else getattr(start, 'dtype', type(start)) + dtype = backend.result_type(dtype, float) + if self.retstep: + return (KerasTensor(output_shape, dtype=dtype), None) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.linspace', 'keras.ops.numpy.linspace']) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + if any_symbolic_tensors((start, stop)): + return Linspace(num, endpoint, retstep, dtype, axis)(start, stop) + return backend.numpy.linspace(start, stop, num=num, endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis) + +class Log(Operation): + + def call(self, x): + return backend.numpy.log(x) + + def compute_output_spec(self, x): + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.log', 'keras.ops.numpy.log']) +def log(x): + if any_symbolic_tensors((x,)): + return Log().symbolic_call(x) + return backend.numpy.log(x) + +class Log10(Operation): + + def call(self, x): + return backend.numpy.log10(x) + + def compute_output_spec(self, x): + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.log10', 'keras.ops.numpy.log10']) +def log10(x): + if any_symbolic_tensors((x,)): + return Log10().symbolic_call(x) + return backend.numpy.log10(x) + +class Log1p(Operation): + + def call(self, x): + return backend.numpy.log1p(x) + + def compute_output_spec(self, x): + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.log1p', 'keras.ops.numpy.log1p']) +def log1p(x): + if any_symbolic_tensors((x,)): + return Log1p().symbolic_call(x) + return backend.numpy.log1p(x) + +class Log2(Operation): + + def call(self, x): + return backend.numpy.log2(x) + + def compute_output_spec(self, x): + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.log2', 'keras.ops.numpy.log2']) +def log2(x): + if any_symbolic_tensors((x,)): + return Log2().symbolic_call(x) + return backend.numpy.log2(x) + +class Logaddexp(Operation): + + def call(self, x1, x2): + return backend.numpy.logaddexp(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.logaddexp', 'keras.ops.numpy.logaddexp']) +def logaddexp(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Logaddexp().symbolic_call(x1, x2) + return backend.numpy.logaddexp(x1, x2) + +class LogicalAnd(Operation): + + def call(self, x1, x2): + return backend.numpy.logical_and(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.logical_and', 'keras.ops.numpy.logical_and']) +def logical_and(x1, x2): + if any_symbolic_tensors((x1, x2)): + return LogicalAnd().symbolic_call(x1, x2) + return backend.numpy.logical_and(x1, x2) + +class LogicalNot(Operation): + + def call(self, x): + return backend.numpy.logical_not(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype='bool') + +@keras_export(['keras.ops.logical_not', 'keras.ops.numpy.logical_not']) +def logical_not(x): + if any_symbolic_tensors((x,)): + return LogicalNot().symbolic_call(x) + return backend.numpy.logical_not(x) + +class LogicalOr(Operation): + + def call(self, x1, x2): + return backend.numpy.logical_or(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.logical_or', 'keras.ops.numpy.logical_or']) +def logical_or(x1, x2): + if any_symbolic_tensors((x1, x2)): + return LogicalOr().symbolic_call(x1, x2) + return backend.numpy.logical_or(x1, x2) + +class Logspace(Operation): + + def __init__(self, num=50, endpoint=True, base=10, dtype=float, axis=0): + super().__init__() + self.num = num + self.endpoint = endpoint + self.base = base + self.dtype = dtype + self.axis = axis + + def call(self, start, stop): + return backend.numpy.logspace(start, stop, num=self.num, endpoint=self.endpoint, base=self.base, dtype=self.dtype, axis=self.axis) + + def compute_output_spec(self, start, stop): + start_shape = getattr(start, 'shape', []) + stop_shape = getattr(stop, 'shape', []) + output_shape = broadcast_shapes(start_shape, stop_shape) + if self.axis == -1: + output_shape = output_shape + [self.num] + elif self.axis >= 0: + output_shape = output_shape[:self.axis] + [self.num] + output_shape[self.axis:] + else: + output_shape = output_shape[:self.axis + 1] + [self.num] + output_shape[self.axis + 1:] + dtype = self.dtype if self.dtype is not None else getattr(start, 'dtype', type(start)) + dtype = backend.result_type(dtype, float) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.logspace', 'keras.ops.numpy.logspace']) +def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): + if any_symbolic_tensors((start, stop)): + return Logspace(num, endpoint, base, dtype, axis)(start, stop) + return backend.numpy.logspace(start, stop, num=num, endpoint=endpoint, base=base, dtype=dtype, axis=axis) + +class Matmul(Operation): + + def call(self, x1, x2): + return backend.numpy.matmul(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = operation_utils.compute_matmul_output_shape(x1_shape, x2_shape) + x1_sparse = getattr(x1, 'sparse', True) + x2_sparse = getattr(x2, 'sparse', True) + output_sparse = x1_sparse and x2_sparse + x1_dtype = backend.standardize_dtype(getattr(x1, 'dtype', type(x1))) + x2_dtype = backend.standardize_dtype(getattr(x2, 'dtype', type(x2))) + if x1_dtype == 'int8' and x2_dtype == 'int8': + dtype = 'int32' + else: + dtype = dtypes.result_type(x1_dtype, x2_dtype) + return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) + +@keras_export(['keras.ops.matmul', 'keras.ops.numpy.matmul']) +def matmul(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Matmul().symbolic_call(x1, x2) + return backend.numpy.matmul(x1, x2) + +class Max(Operation): + + def __init__(self, axis=None, keepdims=False, initial=None): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + self.initial = initial + + def call(self, x): + return backend.numpy.max(x, axis=self.axis, keepdims=self.keepdims, initial=self.initial) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype) + +@keras_export(['keras.ops.max', 'keras.ops.numpy.max']) +def max(x, axis=None, keepdims=False, initial=None): + if any_symbolic_tensors((x,)): + return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x) + return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial) + +class Maximum(Operation): + + def call(self, x1, x2): + return backend.numpy.maximum(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and x2_sparse + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.maximum', 'keras.ops.numpy.maximum']) +def maximum(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Maximum().symbolic_call(x1, x2) + return backend.numpy.maximum(x1, x2) + +class Median(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.median(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + output_shape = reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims) + if backend.standardize_dtype(x.dtype) == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.median', 'keras.ops.numpy.median']) +def median(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Median(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.median(x, axis=axis, keepdims=keepdims) + +class Meshgrid(Operation): + + def __init__(self, indexing='xy'): + super().__init__() + if indexing not in ('xy', 'ij'): + raise ValueError("Valid values for `indexing` are 'xy' and 'ij', but received {index}.") + self.indexing = indexing + + def call(self, *x): + return backend.numpy.meshgrid(*x, indexing=self.indexing) + + def compute_output_spec(self, *x): + output_shape = [] + for xi in x: + if len(xi.shape) == 0: + size = 1 + elif None in xi.shape: + size = None + else: + size = int(np.prod(xi.shape)) + output_shape.append(size) + if self.indexing == 'ij': + return [KerasTensor(output_shape) for _ in range(len(x))] + tmp = output_shape[0] + output_shape[0] = output_shape[1] + output_shape[1] = tmp + return [KerasTensor(output_shape, dtype=xi.dtype) for _ in range(len(x))] + +@keras_export(['keras.ops.meshgrid', 'keras.ops.numpy.meshgrid']) +def meshgrid(*x, indexing='xy'): + if any_symbolic_tensors(x): + return Meshgrid(indexing=indexing).symbolic_call(*x) + return backend.numpy.meshgrid(*x, indexing=indexing) + +class Min(Operation): + + def __init__(self, axis=None, keepdims=False, initial=None): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + self.initial = initial + + def call(self, x): + return backend.numpy.min(x, axis=self.axis, keepdims=self.keepdims, initial=self.initial) + + def compute_output_spec(self, x): + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype) + +@keras_export(['keras.ops.min', 'keras.ops.numpy.min']) +def min(x, axis=None, keepdims=False, initial=None): + if any_symbolic_tensors((x,)): + return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x) + return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial) + +class Minimum(Operation): + + def call(self, x1, x2): + return backend.numpy.minimum(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and x2_sparse + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.minimum', 'keras.ops.numpy.minimum']) +def minimum(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Minimum().symbolic_call(x1, x2) + return backend.numpy.minimum(x1, x2) + +class Mod(Operation): + + def call(self, x1, x2): + return backend.numpy.mod(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if output_dtype == 'bool': + output_dtype = 'int32' + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.mod', 'keras.ops.numpy.mod']) +def mod(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Mod().symbolic_call(x1, x2) + return backend.numpy.mod(x1, x2) + +class Moveaxis(Operation): + + def __init__(self, source, destination): + super().__init__() + if isinstance(source, int): + self.source = [source] + else: + self.source = source + if isinstance(destination, int): + self.destination = [destination] + else: + self.destination = destination + if len(self.source) != len(self.destination): + raise ValueError(f'`source` and `destination` arguments must have the same number of elements, but received `source={source}` and `destination={destination}`.') + + def call(self, x): + return backend.numpy.moveaxis(x, self.source, self.destination) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + output_shape = [-1 for _ in range(len(x.shape))] + for (sc, dst) in zip(self.source, self.destination): + output_shape[dst] = x_shape[sc] + x_shape[sc] = -1 + (i, j) = (0, 0) + while i < len(output_shape): + while i < len(output_shape) and output_shape[i] != -1: + i += 1 + while j < len(output_shape) and x_shape[j] == -1: + j += 1 + if i == len(output_shape): + break + output_shape[i] = x_shape[j] + i += 1 + j += 1 + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.moveaxis', 'keras.ops.numpy.moveaxis']) +def moveaxis(x, source, destination): + if any_symbolic_tensors((x,)): + return Moveaxis(source, destination).symbolic_call(x) + return backend.numpy.moveaxis(x, source=source, destination=destination) + +class NanToNum(Operation): + + def __init__(self, nan=0.0, posinf=None, neginf=None): + super().__init__() + self.nan = nan + self.posinf = posinf + self.neginf = neginf + + def call(self, x): + return backend.numpy.nan_to_num(x, nan=self.nan, posinf=self.posinf, neginf=self.neginf) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.nan_to_num', 'keras.ops.numpy.nan_to_num']) +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + if any_symbolic_tensors((x,)): + return NanToNum(nan=nan, posinf=posinf, neginf=neginf).symbolic_call(x) + return backend.numpy.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + +class Ndim(Operation): + + def call(self, x): + return backend.numpy.ndim(x) + + def compute_output_spec(self, x): + return KerasTensor([len(x.shape)]) + +@keras_export(['keras.ops.ndim', 'keras.ops.numpy.ndim']) +def ndim(x): + if any_symbolic_tensors((x,)): + return Ndim().symbolic_call(x) + return backend.numpy.ndim(x) + +class Nonzero(Operation): + + def call(self, x): + return backend.numpy.nonzero(x) + + def compute_output_spec(self, x): + return tuple([KerasTensor((None,), dtype='int32') for _ in range(len(x.shape))]) + +@keras_export(['keras.ops.nonzero', 'keras.ops.numpy.nonzero']) +def nonzero(x): + if any_symbolic_tensors((x,)): + return Nonzero().symbolic_call(x) + return backend.numpy.nonzero(x) + +class NotEqual(Operation): + + def call(self, x1, x2): + return backend.numpy.not_equal(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.not_equal', 'keras.ops.numpy.not_equal']) +def not_equal(x1, x2): + if any_symbolic_tensors((x1, x2)): + return NotEqual().symbolic_call(x1, x2) + return backend.numpy.not_equal(x1, x2) + +class OnesLike(Operation): + + def call(self, x, dtype=None): + return backend.numpy.ones_like(x, dtype=dtype) + + def compute_output_spec(self, x, dtype=None): + if dtype is None: + dtype = x.dtype + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.ones_like', 'keras.ops.numpy.ones_like']) +def ones_like(x, dtype=None): + if any_symbolic_tensors((x,)): + return OnesLike().symbolic_call(x, dtype=dtype) + return backend.numpy.ones_like(x, dtype=dtype) + +class ZerosLike(Operation): + + def call(self, x, dtype=None): + return backend.numpy.zeros_like(x, dtype=dtype) + + def compute_output_spec(self, x, dtype=None): + if dtype is None: + dtype = x.dtype + return KerasTensor(x.shape, dtype=dtype) + +@keras_export(['keras.ops.zeros_like', 'keras.ops.numpy.zeros_like']) +def zeros_like(x, dtype=None): + if any_symbolic_tensors((x,)): + return ZerosLike().symbolic_call(x, dtype=dtype) + return backend.numpy.zeros_like(x, dtype=dtype) + +class Outer(Operation): + + def call(self, x1, x2): + return backend.numpy.outer(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', [1]) + x2_shape = getattr(x2, 'shape', [1]) + if None in x1_shape: + x1_flatten_shape = None + else: + x1_flatten_shape = int(np.prod(x1_shape)) + if None in x2_shape: + x2_flatten_shape = None + else: + x2_flatten_shape = int(np.prod(x2_shape)) + output_shape = [x1_flatten_shape, x2_flatten_shape] + output_dtype = backend.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.outer', 'keras.ops.numpy.outer']) +def outer(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Outer().symbolic_call(x1, x2) + return backend.numpy.outer(x1, x2) + +class Pad(Operation): + + def __init__(self, pad_width, mode='constant'): + super().__init__() + self.pad_width = self._process_pad_width(pad_width) + self.mode = mode + + def _process_pad_width(self, pad_width): + if isinstance(pad_width, int): + return ((pad_width, pad_width),) + if isinstance(pad_width, (tuple, list)) and isinstance(pad_width[0], int): + return (pad_width,) + first_len = len(pad_width[0]) + for (i, pw) in enumerate(pad_width): + if len(pw) != first_len: + raise ValueError(f'`pad_width` should be a list of tuples of length 1 or 2. Received: pad_width={pad_width}') + if len(pw) == 1: + pad_width[i] = (pw[0], pw[0]) + return pad_width + + def call(self, x, constant_values=None): + return backend.numpy.pad(x, pad_width=self.pad_width, mode=self.mode, constant_values=constant_values) + + def compute_output_spec(self, x, constant_values=None): + output_shape = list(x.shape) + if len(self.pad_width) == 1: + pad_width = [self.pad_width[0] for _ in range(len(output_shape))] + elif len(self.pad_width) == len(output_shape): + pad_width = self.pad_width + else: + raise ValueError(f'`pad_width` must have the same length as `x.shape`. Received: pad_width={self.pad_width} (of length {len(self.pad_width)}) and x.shape={x.shape} (of length {len(x.shape)})') + for i in range(len(output_shape)): + if output_shape[i] is None: + output_shape[i] = None + else: + output_shape[i] += pad_width[i][0] + pad_width[i][1] + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.pad', 'keras.ops.numpy.pad']) +def pad(x, pad_width, mode='constant', constant_values=None): + return Pad(pad_width, mode=mode)(x, constant_values=constant_values) + +class Prod(Operation): + + def __init__(self, axis=None, keepdims=False, dtype=None): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + self.dtype = dtype + + def call(self, x): + return backend.numpy.prod(x, axis=self.axis, keepdims=self.keepdims, dtype=self.dtype) + + def compute_output_spec(self, x): + if self.dtype is not None: + dtype = self.dtype + else: + dtype = backend.result_type(x.dtype) + if dtype == 'bool': + dtype = 'int32' + elif dtype in ('int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + if backend.backend() == 'torch' and dtype == 'uint32': + dtype = 'int32' + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=dtype) + +@keras_export(['keras.ops.prod', 'keras.ops.numpy.prod']) +def prod(x, axis=None, keepdims=False, dtype=None): + if any_symbolic_tensors((x,)): + return Prod(axis=axis, keepdims=keepdims, dtype=dtype).symbolic_call(x) + return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype) + +class Quantile(Operation): + + def __init__(self, axis=None, method='linear', keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.method = method + self.keepdims = keepdims + + def call(self, x, q): + return backend.numpy.quantile(x, q, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x, q): + output_shape = reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims) + if hasattr(q, 'shape'): + if len(q.shape) > 0: + output_shape = (q.shape[0],) + output_shape + if backend.standardize_dtype(x.dtype) == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(x.dtype, float) + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.quantile', 'keras.ops.numpy.quantile']) +def quantile(x, q, axis=None, method='linear', keepdims=False): + if any_symbolic_tensors((x, q)): + return Quantile(axis=axis, method=method, keepdims=keepdims).symbolic_call(x, q) + return backend.numpy.quantile(x, q, axis=axis, method=method, keepdims=keepdims) + +class Ravel(Operation): + + def call(self, x): + return backend.numpy.ravel(x) + + def compute_output_spec(self, x): + if None in x.shape: + output_shape = [None] + else: + output_shape = [int(np.prod(x.shape))] + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.ravel', 'keras.ops.numpy.ravel']) +def ravel(x): + if any_symbolic_tensors((x,)): + return Ravel().symbolic_call(x) + return backend.numpy.ravel(x) + +class Real(Operation): + + def call(self, x): + return backend.numpy.real(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.real', 'keras.ops.numpy.real']) +def real(x): + if any_symbolic_tensors((x,)): + return Real().symbolic_call(x) + return backend.numpy.real(x) + +class Reciprocal(Operation): + + def call(self, x): + return backend.numpy.reciprocal(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape) + +@keras_export(['keras.ops.reciprocal', 'keras.ops.numpy.reciprocal']) +def reciprocal(x): + if any_symbolic_tensors((x,)): + return Reciprocal().symbolic_call(x) + return backend.numpy.reciprocal(x) + +class Repeat(Operation): + + def __init__(self, repeats, axis=None): + super().__init__() + self.axis = axis + self.repeats = repeats + + def call(self, x): + return backend.numpy.repeat(x, self.repeats, axis=self.axis) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + repeats = self.repeats + if isinstance(repeats, int): + repeats = [repeats] + repeats_size = len(repeats) + broadcast = repeats_size == 1 + if self.axis is None: + if None in x_shape: + return KerasTensor([None], dtype=x.dtype) + x_flatten_size = int(np.prod(x_shape)) + if broadcast: + output_shape = [x_flatten_size * repeats[0]] + elif repeats_size != x_flatten_size: + raise ValueError(f'Size of `repeats` and dimensions of `x` after flattening should be compatible. Received: {repeats_size} and {x_flatten_size}') + else: + output_shape = [int(np.sum(repeats))] + return KerasTensor(output_shape, dtype=x.dtype) + size_on_ax = x_shape[self.axis] + if size_on_ax is None: + return KerasTensor(x_shape, dtype=x.dtype) + output_shape = x_shape + if broadcast: + output_shape[self.axis] = size_on_ax * repeats[0] + elif size_on_ax != repeats_size: + raise ValueError(f'Size of `repeats` and dimensions of `axis {self.axis} of x` should be compatible. Received: {repeats_size} and {x_shape}') + else: + output_shape[self.axis] = int(np.sum(repeats)) + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.repeat', 'keras.ops.numpy.repeat']) +def repeat(x, repeats, axis=None): + if any_symbolic_tensors((x,)): + return Repeat(repeats, axis=axis).symbolic_call(x) + return backend.numpy.repeat(x, repeats, axis=axis) + +class Reshape(Operation): + + def __init__(self, newshape): + super().__init__() + self.newshape = newshape + + def call(self, x): + return backend.numpy.reshape(x, self.newshape) + + def compute_output_spec(self, x): + output_shape = operation_utils.compute_reshape_output_shape(x.shape, self.newshape, 'newshape') + sparse = getattr(x, 'sparse', False) + return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.reshape', 'keras.ops.numpy.reshape']) +def reshape(x, newshape): + if any_symbolic_tensors((x,)): + return Reshape(newshape).symbolic_call(x) + return backend.numpy.reshape(x, newshape) + +class Roll(Operation): + + def __init__(self, shift, axis=None): + super().__init__() + self.shift = shift + self.axis = axis + + def call(self, x): + return backend.numpy.roll(x, self.shift, self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.roll', 'keras.ops.numpy.roll']) +def roll(x, shift, axis=None): + if any_symbolic_tensors((x,)): + return Roll(shift, axis=axis).symbolic_call(x) + return backend.numpy.roll(x, shift, axis=axis) + +class Round(Operation): + + def __init__(self, decimals=0): + super().__init__() + self.decimals = decimals + + def call(self, x): + return backend.numpy.round(x, self.decimals) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.round', 'keras.ops.numpy.round']) +def round(x, decimals=0): + if any_symbolic_tensors((x,)): + return Round(decimals).symbolic_call(x) + return backend.numpy.round(x, decimals) + +class SearchSorted(Operation): + + def call(self, sorted_sequence, values, side='left'): + sorted_sequence = backend.convert_to_tensor(sorted_sequence) + values = backend.convert_to_tensor(values) + return backend.numpy.searchsorted(sorted_sequence, values, side=side) + + def compute_output_spec(self, sorted_sequence, values, side='left'): + if len(sorted_sequence.shape) != 1: + raise ValueError('searchsorted only supports 1-D sorted sequences. Usekeras.ops.vectorized_map to extend to N-D sequences.') + out_type = 'int32' if sorted_sequence.shape[0] <= np.iinfo(np.int32).max else 'int64' + return KerasTensor(values.shape, dtype=out_type) + +@keras_export(['keras.ops.searchsorted']) +def searchsorted(sorted_sequence, values, side='left'): + if any_symbolic_tensors((sorted_sequence, values)): + return SearchSorted().symbolic_call(sorted_sequence, values, side=side) + sorted_sequence = backend.convert_to_tensor(sorted_sequence) + values = backend.convert_to_tensor(values) + return backend.numpy.searchsorted(sorted_sequence, values, side=side) + +class Sign(Operation): + + def call(self, x): + return backend.numpy.sign(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.sign', 'keras.ops.numpy.sign']) +def sign(x): + if any_symbolic_tensors((x,)): + return Sign().symbolic_call(x) + return backend.numpy.sign(x) + +class Sin(Operation): + + def call(self, x): + return backend.numpy.sin(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.sin', 'keras.ops.numpy.sin']) +def sin(x): + if any_symbolic_tensors((x,)): + return Sin().symbolic_call(x) + return backend.numpy.sin(x) + +class Sinh(Operation): + + def call(self, x): + return backend.numpy.sinh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.sinh', 'keras.ops.numpy.sinh']) +def sinh(x): + if any_symbolic_tensors((x,)): + return Sinh().symbolic_call(x) + return backend.numpy.sinh(x) + +class Size(Operation): + + def call(self, x): + return backend.numpy.size(x) + + def compute_output_spec(self, x): + return KerasTensor([], dtype='int32') + +@keras_export(['keras.ops.size', 'keras.ops.numpy.size']) +def size(x): + if any_symbolic_tensors((x,)): + return Size().symbolic_call(x) + return backend.numpy.size(x) + +class Sort(Operation): + + def __init__(self, axis=-1): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.numpy.sort(x, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, x.dtype) + +@keras_export(['keras.ops.sort', 'keras.ops.numpy.sort']) +def sort(x, axis=-1): + if any_symbolic_tensors((x,)): + return Sort(axis=axis).symbolic_call(x) + return backend.numpy.sort(x, axis=axis) + +class Split(Operation): + + def __init__(self, indices_or_sections, axis=0): + super().__init__() + if not isinstance(indices_or_sections, int): + indices_or_sections = tuple(indices_or_sections) + self.indices_or_sections = indices_or_sections + self.axis = axis + + def call(self, x): + return backend.numpy.split(x, self.indices_or_sections, axis=self.axis) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + x_size_on_axis = x_shape[self.axis] + if isinstance(self.indices_or_sections, int): + if x_size_on_axis is None: + x_shape[self.axis] = None + return [KerasTensor(x_shape, dtype=x.dtype) for _ in range(self.indices_or_sections)] + if np.mod(x_size_on_axis, self.indices_or_sections) != 0: + raise ValueError(f'`x` size on given `axis` must be dividible by `indices_or_sections` when `indices_or_sections` is an int. But received {x_size_on_axis} and {self.indices_or_sections}.') + size = x_size_on_axis // self.indices_or_sections + x_shape[self.axis] = size + return [KerasTensor(x_shape, dtype=x.dtype) for _ in range(self.indices_or_sections)] + indices_or_sections = (0, *self.indices_or_sections, x_size_on_axis) + output_size = np.diff(indices_or_sections) + outputs = [] + for i in range(len(output_size)): + output_shape = list(x_shape) + output_shape[self.axis] = int(output_size[i]) + outputs.append(KerasTensor(output_shape, dtype=x.dtype)) + return outputs + +@keras_export(['keras.ops.split', 'keras.ops.numpy.split']) +def split(x, indices_or_sections, axis=0): + if any_symbolic_tensors((x,)): + return Split(indices_or_sections, axis=axis).symbolic_call(x) + return backend.numpy.split(x, indices_or_sections, axis=axis) + +class Stack(Operation): + + def __init__(self, axis=0): + super().__init__() + self.axis = axis + + def call(self, xs): + return backend.numpy.stack(xs, axis=self.axis) + + def compute_output_spec(self, xs): + first_shape = xs[0].shape + dtypes_to_resolve = [] + for x in xs: + if not shape_equal(x.shape, first_shape, axis=[], allow_none=True): + raise ValueError(f"Every value in `xs` must have the same shape. But found element of shape {x.shape}, which is different from the first element's shape {first_shape}.") + dtypes_to_resolve.append(getattr(x, 'dtype', type(x))) + size_on_axis = len(xs) + output_shape = list(first_shape) + if self.axis == -1: + output_shape = output_shape + [size_on_axis] + elif self.axis >= 0: + output_shape.insert(self.axis, size_on_axis) + else: + output_shape.insert(self.axis + 1, size_on_axis) + output_dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.stack', 'keras.ops.numpy.stack']) +def stack(x, axis=0): + if any_symbolic_tensors((x,)): + return Stack(axis=axis).symbolic_call(x) + return backend.numpy.stack(x, axis=axis) + +class Std(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + self.axis = [axis] + else: + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.std(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + output_dtype = backend.standardize_dtype(x.dtype) + if 'int' in output_dtype or output_dtype == 'bool': + output_dtype = backend.floatx() + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype) + +@keras_export(['keras.ops.std', 'keras.ops.numpy.std']) +def std(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Std(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.std(x, axis=axis, keepdims=keepdims) + +class Swapaxes(Operation): + + def __init__(self, axis1, axis2): + super().__init__() + self.axis1 = axis1 + self.axis2 = axis2 + + def call(self, x): + return backend.numpy.swapaxes(x, self.axis1, self.axis2) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + tmp = x_shape[self.axis1] + x_shape[self.axis1] = x_shape[self.axis2] + x_shape[self.axis2] = tmp + return KerasTensor(x_shape, dtype=x.dtype) + +@keras_export(['keras.ops.swapaxes', 'keras.ops.numpy.swapaxes']) +def swapaxes(x, axis1, axis2): + if any_symbolic_tensors((x,)): + return Swapaxes(axis1, axis2).symbolic_call(x) + return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2) + +class Take(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x, indices): + return backend.numpy.take(x, indices, axis=self.axis) + + def compute_output_spec(self, x, indices): + x_shape = list(x.shape) + if isinstance(indices, KerasTensor): + indices_shape = list(indices.shape) + else: + indices_shape = list(getattr(np.array(indices), 'shape', [])) + if self.axis is None: + return KerasTensor(indices_shape, dtype=x.dtype) + axis = len(x_shape) + self.axis if self.axis < 0 else self.axis + output_shape = x_shape[:axis] + indices_shape + x_shape[axis + 1:] + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.take', 'keras.ops.numpy.take']) +def take(x, indices, axis=None): + if any_symbolic_tensors((x, indices)): + return Take(axis=axis).symbolic_call(x, indices) + return backend.numpy.take(x, indices, axis=axis) + +class TakeAlongAxis(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x, indices): + return backend.numpy.take_along_axis(x, indices, axis=self.axis) + + def compute_output_spec(self, x, indices): + output_shape = operation_utils.compute_take_along_axis_output_shape(x.shape, indices.shape, self.axis) + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.take_along_axis', 'keras.ops.numpy.take_along_axis']) +def take_along_axis(x, indices, axis=None): + if any_symbolic_tensors((x, indices)): + return TakeAlongAxis(axis=axis).symbolic_call(x, indices) + return backend.numpy.take_along_axis(x, indices, axis=axis) + +class Tan(Operation): + + def call(self, x): + return backend.numpy.tan(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.tan', 'keras.ops.numpy.tan']) +def tan(x): + if any_symbolic_tensors((x,)): + return Tan().symbolic_call(x) + return backend.numpy.tan(x) + +class Tanh(Operation): + + def call(self, x): + return backend.numpy.tanh(x) + + def compute_output_spec(self, x): + dtype = backend.standardize_dtype(getattr(x, 'dtype', backend.floatx())) + if dtype == 'int64': + dtype = backend.floatx() + else: + dtype = dtypes.result_type(dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.tanh', 'keras.ops.numpy.tanh']) +def tanh(x): + if any_symbolic_tensors((x,)): + return Tanh().symbolic_call(x) + return backend.numpy.tanh(x) + +class Tensordot(Operation): + + def __init__(self, axes=2): + super().__init__() + self.axes = axes + + def call(self, x1, x2): + return backend.numpy.tensordot(x1, x2, axes=self.axes) + + def compute_output_spec(self, x1, x2): + x1_shape = list(getattr(x1, 'shape', [])) + x2_shape = list(getattr(x2, 'shape', [])) + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if not isinstance(self.axes, int): + x1_select_shape = [x1_shape[ax] for ax in self.axes[0]] + x2_select_shape = [x2_shape[ax] for ax in self.axes[1]] + if not shape_equal(x1_select_shape, x2_select_shape, allow_none=True): + raise ValueError(f'Shape mismatch on `x1[axes[0]]` and `x2[axes[1]]`, received {x1_select_shape} and {x2_select_shape}.') + for ax in self.axes[0]: + x1_shape[ax] = -1 + for ax in self.axes[1]: + x2_shape[ax] = -1 + x1_shape = list(filter((-1).__ne__, x1_shape)) + x2_shape = list(filter((-1).__ne__, x2_shape)) + output_shape = x1_shape + x2_shape + return KerasTensor(output_shape, dtype=dtype) + if self.axes <= 0: + output_shape = x1_shape + x2_shape + else: + output_shape = x1_shape[:-self.axes] + x2_shape[self.axes:] + return KerasTensor(output_shape, dtype=dtype) + +@keras_export(['keras.ops.tensordot', 'keras.ops.numpy.tensordot']) +def tensordot(x1, x2, axes=2): + if any_symbolic_tensors((x1, x2)): + return Tensordot(axes=axes).symbolic_call(x1, x2) + return backend.numpy.tensordot(x1, x2, axes=axes) + +class Tile(Operation): + + def __init__(self, repeats): + super().__init__() + self.repeats = repeats + + def call(self, x): + return backend.numpy.tile(x, self.repeats) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + repeats = self.repeats + if isinstance(repeats, int): + repeats = [repeats] + if len(x_shape) > len(repeats): + repeats = [1] * (len(x_shape) - len(repeats)) + repeats + else: + x_shape = [1] * (len(repeats) - len(x_shape)) + x_shape + output_shape = [] + for (x_size, repeat) in zip(x_shape, repeats): + if x_size is None: + output_shape.append(None) + else: + output_shape.append(x_size * repeat) + return KerasTensor(output_shape, dtype=x.dtype) + +@keras_export(['keras.ops.tile', 'keras.ops.numpy.tile']) +def tile(x, repeats): + if any_symbolic_tensors((x,)): + return Tile(repeats).symbolic_call(x) + return backend.numpy.tile(x, repeats) + +class Trace(Operation): + + def __init__(self, offset=0, axis1=0, axis2=1): + super().__init__() + self.offset = offset + self.axis1 = axis1 + self.axis2 = axis2 + + def call(self, x): + return backend.numpy.trace(x, offset=self.offset, axis1=self.axis1, axis2=self.axis2) + + def compute_output_spec(self, x): + x_shape = list(x.shape) + x_shape[self.axis1] = -1 + x_shape[self.axis2] = -1 + output_shape = list(filter((-1).__ne__, x_shape)) + output_dtype = backend.standardize_dtype(x.dtype) + if output_dtype not in ('int64', 'uint32', 'uint64'): + output_dtype = dtypes.result_type(output_dtype, 'int32') + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.trace', 'keras.ops.numpy.trace']) +def trace(x, offset=0, axis1=0, axis2=1): + if any_symbolic_tensors((x,)): + return Trace(offset, axis1, axis2).symbolic_call(x) + return backend.numpy.trace(x, offset=offset, axis1=axis1, axis2=axis2) + +class Tri(Operation): + + def __init__(self, k=0, dtype=None): + super().__init__() + self.k = k + self.dtype = dtype or backend.floatx() + + def call(self, N, M=None): + return backend.numpy.tri(N=N, M=M, k=self.k, dtype=self.dtype) + + def compute_output_spec(self, N, M=None): + if M is None: + M = N + return KerasTensor((N, M), dtype=self.dtype) + +@keras_export(['keras.ops.tri', 'keras.ops.numpy.tri']) +def tri(N, M=None, k=0, dtype=None): + return backend.numpy.tri(N, M=M, k=k, dtype=dtype) + +class Tril(Operation): + + def __init__(self, k=0): + super().__init__() + self.k = k + + def call(self, x): + return backend.numpy.tril(x, k=self.k) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.tril', 'keras.ops.numpy.tril']) +def tril(x, k=0): + if any_symbolic_tensors((x,)): + return Tril(k=k).symbolic_call(x) + return backend.numpy.tril(x, k=k) + +class Triu(Operation): + + def __init__(self, k=0): + super().__init__() + self.k = k + + def call(self, x): + return backend.numpy.triu(x, k=self.k) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.triu', 'keras.ops.numpy.triu']) +def triu(x, k=0): + if any_symbolic_tensors((x,)): + return Triu(k=k).symbolic_call(x) + return backend.numpy.triu(x, k=k) + +class Trunc(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return backend.numpy.trunc(x) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) + +@keras_export(['keras.ops.trunc', 'keras.ops.numpy.trunc']) +def trunc(x): + if any_symbolic_tensors((x,)): + return Trunc().symbolic_call(x) + return backend.numpy.trunc(x) + +class Vdot(Operation): + + def call(self, x1, x2): + return backend.numpy.vdot(x1, x2) + + def compute_output_spec(self, x1, x2): + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor([], dtype=dtype) + +@keras_export(['keras.ops.vdot', 'keras.ops.numpy.vdot']) +def vdot(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Vdot().symbolic_call(x1, x2) + return backend.numpy.vdot(x1, x2) + +@keras_export(['keras.ops.vectorize', 'keras.ops.numpy.vectorize']) +def vectorize(pyfunc, *, excluded=None, signature=None): + if not callable(pyfunc): + raise ValueError(f'Expected argument `pyfunc` to be a callable. Received: pyfunc={pyfunc}') + return backend.numpy.vectorize(pyfunc, excluded=excluded, signature=signature) + +class Vstack(Operation): + + def call(self, xs): + return backend.numpy.vstack(xs) + + def compute_output_spec(self, xs): + first_shape = xs[0].shape + total_size_on_axis = 0 + dtypes_to_resolve = [] + for x in xs: + if not shape_equal(x.shape, first_shape, axis=[0], allow_none=True): + raise ValueError(f"Every value in `xs` must have the same shape except on the `axis` dim. But found element of shape {x.shape}, which is different from the first element's shape {first_shape}.") + if total_size_on_axis is None or x.shape[0] is None: + total_size_on_axis = None + else: + total_size_on_axis += x.shape[0] + dtypes_to_resolve.append(getattr(x, 'dtype', type(x))) + output_shape = list(first_shape) + output_shape[0] = total_size_on_axis + output_dtype = dtypes.result_type(*dtypes_to_resolve) + return KerasTensor(output_shape, output_dtype) + +@keras_export(['keras.ops.vstack', 'keras.ops.numpy.vstack']) +def vstack(xs): + if any_symbolic_tensors((xs,)): + return Vstack().symbolic_call(xs) + return backend.numpy.vstack(xs) + +class Where(Operation): + + def call(self, condition, x1=None, x2=None): + return backend.numpy.where(condition, x1, x2) + + def compute_output_spec(self, condition, x1, x2): + condition_shape = getattr(condition, 'shape', []) + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(condition_shape, x1_shape) + output_shape = broadcast_shapes(output_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1) if x1 is not None else 'int'), getattr(x2, 'dtype', type(x2) if x2 is not None else 'int')) + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.where', 'keras.ops.numpy.where']) +def where(condition, x1=None, x2=None): + if x1 is None and x2 is not None or (x1 is not None and x2 is None): + raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.') + if any_symbolic_tensors((condition, x1, x2)): + return Where().symbolic_call(condition, x1, x2) + return backend.numpy.where(condition, x1, x2) + +class Subtract(Operation): + + def call(self, x1, x2): + return backend.numpy.subtract(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and x2_sparse + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) + +@keras_export(['keras.ops.subtract', 'keras.ops.numpy.subtract']) +def subtract(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Subtract().symbolic_call(x1, x2) + return backend.numpy.subtract(x1, x2) + +class Multiply(Operation): + + def call(self, x1, x2): + return backend.numpy.multiply(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + x1_sparse = getattr(x1, 'sparse', True) + x2_sparse = getattr(x2, 'sparse', True) + output_sparse = x1_sparse or x2_sparse + dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) + +@keras_export(['keras.ops.multiply', 'keras.ops.numpy.multiply']) +def multiply(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Multiply().symbolic_call(x1, x2) + return backend.numpy.multiply(x1, x2) + +class Divide(Operation): + + def call(self, x1, x2): + return backend.numpy.divide(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and (not x2_sparse) + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.divide', 'keras.ops.numpy.divide']) +def divide(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Divide().symbolic_call(x1, x2) + return backend.numpy.divide(x1, x2) + +class DivideNoNan(Operation): + + def call(self, x1, x2): + return backend.numpy.divide_no_nan(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and (not x2_sparse) + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.divide_no_nan', 'keras.ops.numpy.divide_no_nan']) +def divide_no_nan(x1, x2): + if any_symbolic_tensors((x1, x2)): + return DivideNoNan().symbolic_call(x1, x2) + return backend.numpy.divide_no_nan(x1, x2) + +class TrueDivide(Operation): + + def call(self, x1, x2): + return backend.numpy.true_divide(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2)), float) + x1_sparse = getattr(x1, 'sparse', False) + x2_sparse = getattr(x2, 'sparse', False) + output_sparse = x1_sparse and (not x2_sparse) + return KerasTensor(output_shape, dtype=output_dtype, sparse=output_sparse) + +@keras_export(['keras.ops.true_divide', 'keras.ops.numpy.true_divide']) +def true_divide(x1, x2): + if any_symbolic_tensors((x1, x2)): + return TrueDivide().symbolic_call(x1, x2) + return backend.numpy.true_divide(x1, x2) + +class Power(Operation): + + def call(self, x1, x2): + return backend.numpy.power(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.power', 'keras.ops.numpy.power']) +def power(x1, x2): + if any_symbolic_tensors((x1, x2)): + return Power().symbolic_call(x1, x2) + return backend.numpy.power(x1, x2) + +class Negative(Operation): + + def call(self, x): + return backend.numpy.negative(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.negative', 'keras.ops.numpy.negative']) +def negative(x): + if any_symbolic_tensors((x,)): + return Negative().symbolic_call(x) + return backend.numpy.negative(x) + +class Square(Operation): + + def call(self, x): + return backend.numpy.square(x) + + def compute_output_spec(self, x): + sparse = getattr(x, 'sparse', False) + dtype = backend.standardize_dtype(x.dtype) + if dtype == 'bool': + dtype = 'int32' + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.square', 'keras.ops.numpy.square']) +def square(x): + if any_symbolic_tensors((x,)): + return Square().symbolic_call(x) + return backend.numpy.square(x) + +class Sqrt(Operation): + + def call(self, x): + x = backend.convert_to_tensor(x) + return backend.numpy.sqrt(x) + + def compute_output_spec(self, x): + dtype = backend.floatx() if backend.standardize_dtype(x.dtype) == 'int64' else dtypes.result_type(x.dtype, float) + sparse = getattr(x, 'sparse', False) + return KerasTensor(x.shape, dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.sqrt', 'keras.ops.numpy.sqrt']) +def sqrt(x): + if any_symbolic_tensors((x,)): + return Sqrt().symbolic_call(x) + x = backend.convert_to_tensor(x) + return backend.numpy.sqrt(x) + +class Squeeze(Operation): + + def __init__(self, axis=None): + super().__init__() + self.axis = axis + + def call(self, x): + return backend.numpy.squeeze(x, axis=self.axis) + + def compute_output_spec(self, x): + input_shape = list(x.shape) + sparse = getattr(x, 'sparse', False) + axis = to_tuple_or_list(self.axis) + if axis is None: + output_shape = list(filter(1 .__ne__, input_shape)) + return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) + else: + for a in axis: + if input_shape[a] != 1: + raise ValueError(f'Cannot squeeze axis {a}, because the dimension is not 1.') + axis = [canonicalize_axis(a, len(input_shape)) for a in axis] + for a in sorted(axis, reverse=True): + del input_shape[a] + return KerasTensor(input_shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.squeeze', 'keras.ops.numpy.squeeze']) +def squeeze(x, axis=None): + if any_symbolic_tensors((x,)): + return Squeeze(axis=axis).symbolic_call(x) + return backend.numpy.squeeze(x, axis=axis) + +class Transpose(Operation): + + def __init__(self, axes=None): + super().__init__() + self.axes = axes + + def call(self, x): + return backend.numpy.transpose(x, axes=self.axes) + + def compute_output_spec(self, x): + output_shape = operation_utils.compute_transpose_output_shape(x.shape, self.axes) + sparse = getattr(x, 'sparse', False) + return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) + +@keras_export(['keras.ops.transpose', 'keras.ops.numpy.transpose']) +def transpose(x, axes=None): + if any_symbolic_tensors((x,)): + return Transpose(axes=axes).symbolic_call(x) + return backend.numpy.transpose(x, axes=axes) + +class Mean(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.mean(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + ori_dtype = backend.standardize_dtype(x.dtype) + compute_dtype = dtypes.result_type(x.dtype, 'float32') + if 'int' in ori_dtype or ori_dtype == 'bool': + result_dtype = compute_dtype + else: + result_dtype = ori_dtype + sparse = getattr(x, 'sparse', False) + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=result_dtype, sparse=sparse) + +@keras_export(['keras.ops.mean', 'keras.ops.numpy.mean']) +def mean(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Mean(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.mean(x, axis=axis, keepdims=keepdims) + +class Var(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.var(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + output_dtype = backend.result_type(getattr(x, 'dtype', type(x)), float) + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype) + +@keras_export(['keras.ops.var', 'keras.ops.numpy.var']) +def var(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Var(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.var(x, axis=axis, keepdims=keepdims) + +class Sum(Operation): + + def __init__(self, axis=None, keepdims=False): + super().__init__() + if isinstance(axis, int): + axis = [axis] + self.axis = axis + self.keepdims = keepdims + + def call(self, x): + return backend.numpy.sum(x, axis=self.axis, keepdims=self.keepdims) + + def compute_output_spec(self, x): + dtype = dtypes.result_type(getattr(x, 'dtype', backend.floatx())) + if dtype in ('bool', 'int8', 'int16'): + dtype = 'int32' + elif dtype in ('uint8', 'uint16'): + dtype = 'uint32' + if backend.backend() == 'torch' and dtype == 'uint32': + dtype = 'int32' + sparse = getattr(x, 'sparse', False) + return KerasTensor(reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=dtype, sparse=sparse) + +@keras_export(['keras.ops.sum', 'keras.ops.numpy.sum']) +def sum(x, axis=None, keepdims=False): + if any_symbolic_tensors((x,)): + return Sum(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.sum(x, axis=axis, keepdims=keepdims) + +class Zeros(Operation): + + def call(self, shape, dtype=None): + return backend.numpy.zeros(shape, dtype=dtype) + + def compute_output_spec(self, shape, dtype=None): + dtype = dtype or backend.floatx() + return KerasTensor(shape, dtype=dtype) + +@keras_export(['keras.ops.zeros', 'keras.ops.numpy.zeros']) +def zeros(shape, dtype=None): + return backend.numpy.zeros(shape, dtype=dtype) + +class Ones(Operation): + + def call(self, shape, dtype=None): + return backend.numpy.ones(shape, dtype=dtype) + + def compute_output_spec(self, shape, dtype=None): + dtype = dtype or backend.floatx() + return KerasTensor(shape, dtype=dtype) + +@keras_export(['keras.ops.ones', 'keras.ops.numpy.ones']) +def ones(shape, dtype=None): + return backend.numpy.ones(shape, dtype=dtype) + +class Eye(Operation): + + def __init__(self, k=0, dtype=None): + super().__init__() + self.k = k + self.dtype = dtype or backend.floatx() + + def call(self, N, M=None): + return backend.numpy.eye(N, M=M, k=self.k, dtype=self.dtype) + + def compute_output_spec(self, N, M=None): + if M is None: + M = N + return KerasTensor((N, M), dtype=self.dtype) + +@keras_export(['keras.ops.eye', 'keras.ops.numpy.eye']) +def eye(N, M=None, k=0, dtype=None): + return backend.numpy.eye(N, M=M, k=k, dtype=dtype) + +class FloorDivide(Operation): + + def call(self, x1, x2): + return backend.numpy.floor_divide(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.floor_divide', 'keras.ops.numpy.floor_divide']) +def floor_divide(x1, x2): + if any_symbolic_tensors((x1, x2)): + return FloorDivide().symbolic_call(x1, x2) + return backend.numpy.floor_divide(x1, x2) + +class LogicalXor(Operation): + + def call(self, x1, x2): + return backend.numpy.logical_xor(x1, x2) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + output_shape = broadcast_shapes(x1_shape, x2_shape) + return KerasTensor(output_shape, dtype='bool') + +@keras_export(['keras.ops.logical_xor', 'keras.ops.numpy.logical_xor']) +def logical_xor(x1, x2): + if any_symbolic_tensors((x1, x2)): + return LogicalXor().symbolic_call(x1, x2) + return backend.numpy.logical_xor(x1, x2) + +class Correlate(Operation): + + def __init__(self, mode='valid'): + super().__init__() + self.mode = mode + + def call(self, x1, x2): + return backend.numpy.correlate(x1, x2, mode=self.mode) + + def compute_output_spec(self, x1, x2): + x1_shape = getattr(x1, 'shape', []) + x2_shape = getattr(x2, 'shape', []) + if len(x1_shape) != 1: + raise ValueError('`x1` must be a 1-dimensional tensor, but received' + f'shape {x1_shape}') + if len(x2_shape) != 1: + raise ValueError('`x2` must be a 1-dimensional tensor, but received' + f'shape {x2_shape}') + (x1_len, x2_len) = (x1_shape[0], x2_shape[0]) + output_shape = (np.maximum(x1_len, x2_len) - np.minimum(x1_len, x2_len) + 1,) + if self.mode == 'same': + output_shape = (np.maximum(x1_len, x2_len),) + elif self.mode == 'full': + output_shape = (x1_len + x2_len - 1,) + if self.mode not in ('valid', 'same', 'full'): + raise ValueError(f'`mode` must be either `valid`, `same`, or `full`, butreceived: {self.mode}') + output_dtype = dtypes.result_type(getattr(x1, 'dtype', type(x1)), getattr(x2, 'dtype', type(x2))) + if output_dtype == 'int64': + output_dtype = 'float64' + elif output_dtype not in ['bfloat16', 'float16', 'float64']: + output_dtype = 'float32' + return KerasTensor(output_shape, dtype=output_dtype) + +@keras_export(['keras.ops.correlate', 'keras.ops.numpy.correlate']) +def correlate(x1, x2, mode='valid'): + if any_symbolic_tensors((x1, x2)): + return Correlate(mode=mode).symbolic_call(x1, x2) + return backend.numpy.correlate(x1, x2, mode=mode) + +class Select(Operation): + + def __init__(self): + super().__init__() + + def call(self, condlist, choicelist, default=0): + return backend.numpy.select(condlist, choicelist, default) + + def compute_output_spec(self, condlist, choicelist, default=0): + first_element = choicelist[0] + return KerasTensor(first_element.shape, dtype=first_element.dtype) + +@keras_export(['keras.ops.select', 'keras.ops.numpy.select']) +def select(condlist, choicelist, default=0): + if not isinstance(condlist, (list, tuple)) or not isinstance(choicelist, (list, tuple)): + raise ValueError(f'condlist and choicelist must be lists. Received: type(condlist) = {type(condlist)}, type(choicelist) = {type(choicelist)}') + condlist = list(condlist) + choicelist = list(choicelist) + if not condlist or not choicelist: + raise ValueError(f'condlist and choicelist must not be empty. Received: condlist = {condlist}, choicelist = {choicelist}') + if any_symbolic_tensors(condlist + choicelist + [default]): + return Select().symbolic_call(condlist, choicelist, default) + return backend.numpy.select(condlist, choicelist, default) + +class Slogdet(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return backend.numpy.slogdet(x) + + def compute_output_spec(self, x): + sign = KerasTensor((), dtype=x.dtype) + logabsdet = KerasTensor(x.shape[:-2], dtype=x.dtype) + return (sign, logabsdet) + +@keras_export(['keras.ops.slogdet', 'keras.ops.numpy.slogdet']) +def slogdet(x): + if any_symbolic_tensors((x,)): + return Slogdet().symbolic_call(x) + return backend.numpy.slogdet(x) + +class Argpartition(Operation): + + def __init__(self, kth, axis=-1): + super().__init__() + if not isinstance(kth, int): + raise ValueError(f'kth must be an integer. Received:kth = {kth}') + self.kth = kth + self.axis = axis + + def call(self, x): + return backend.numpy.argpartition(x, kth=self.kth, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype='int32') + +@keras_export(['keras.ops.argpartition', 'keras.ops.numpy.argpartition']) +def argpartition(x, kth, axis=-1): + if any_symbolic_tensors((x,)): + return Argpartition(kth, axis).symbolic_call(x) + return backend.numpy.argpartition(x, kth, axis) + +# File: keras-master/keras/src/ops/operation.py +import inspect +import textwrap +from keras.src import backend +from keras.src import dtype_policies +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import any_symbolic_tensors +from keras.src.ops.node import Node +from keras.src.utils import python_utils +from keras.src.utils import traceback_utils +from keras.src.utils.naming import auto_name + +@keras_export('keras.Operation') +class Operation: + + def __init__(self, dtype=None, name=None): + if name is None: + name = auto_name(self.__class__.__name__) + if not isinstance(name, str) or '/' in name: + raise ValueError(f'Argument `name` must be a string and cannot contain character `/`. Received: name={name} (of type {type(name)})') + self._dtype_policy = dtype_policies.get(dtype) + self.name = name + self._inbound_nodes = [] + self._outbound_nodes = [] + + @traceback_utils.filter_traceback + def __call__(self, *args, **kwargs): + if traceback_utils.is_traceback_filtering_enabled(): + if any_symbolic_tensors(args, kwargs): + call_fn = self.symbolic_call + elif getattr(self, 'quantization_mode', None) is not None: + call_fn = self.quantized_call + else: + call_fn = self.call + call_fn = traceback_utils.inject_argument_info_in_traceback(call_fn, object_name=f'{self.__class__.__name__}.call()') + return call_fn(*args, **kwargs) + if any_symbolic_tensors(args, kwargs): + return self.symbolic_call(*args, **kwargs) + if getattr(self, 'quantization_mode', None) is not None: + return self.quantized_call(*args, **kwargs) + else: + return self.call(*args, **kwargs) + + def symbolic_call(self, *args, **kwargs): + outputs = self.compute_output_spec(*args, **kwargs) + Node(operation=self, call_args=args, call_kwargs=kwargs, outputs=outputs) + return outputs + + def call(self, *args, **kwargs): + raise NotImplementedError + + def quantized_call(self, *args, **kwargs): + raise NotImplementedError + + def compute_output_spec(self, *args, **kwargs): + try: + return backend.compute_output_spec(self.call, *args, **kwargs) + except Exception as e: + new_e = e.__class__(f"Could not automatically infer the output shape / dtype of '{self.name}' (of type {self.__class__.__name__}). Either the `{self.__class__.__name__}.call()` method is incorrect, or you need to implement the `{self.__class__.__name__}.compute_output_spec() / compute_output_shape()` method. Error encountered:\n\n{e}") + raise new_e.with_traceback(e.__traceback__) from None + + def __new__(cls, *args, **kwargs): + instance = super(Operation, cls).__new__(cls) + arg_names = inspect.getfullargspec(cls.__init__).args + kwargs.update(dict(zip(arg_names[1:len(args) + 1], args))) + dtype = kwargs.get('dtype', None) + if dtype is not None and isinstance(dtype, dtype_policies.DTypePolicy): + if dtype.quantization_mode is None: + kwargs['dtype'] = dtype.name + else: + kwargs['dtype'] = dtype_policies.serialize(dtype) + supported_types = (str, int, float, bool, type(None)) + try: + flat_arg_values = tree.flatten(kwargs) + auto_config = True + for value in flat_arg_values: + if not isinstance(value, supported_types): + auto_config = False + break + except TypeError: + auto_config = False + try: + instance._lock = False + if auto_config: + from keras.src.saving import serialization_lib + instance._auto_config = serialization_lib.SerializableDict(**kwargs) + else: + instance._auto_config = None + instance._lock = True + except RecursionError: + pass + return instance + + @python_utils.default + def get_config(self): + config = {'name': self.name} + if not python_utils.is_default(self.get_config): + return config + if getattr(self, '_auto_config', None) is not None: + xtra_args = set(config.keys()) + config.update(self._auto_config.config) + argspec = inspect.getfullargspec(self.__init__) + if argspec.varkw != 'kwargs': + for key in xtra_args - xtra_args.intersection(argspec.args[1:]): + config.pop(key, None) + return config + else: + raise NotImplementedError(textwrap.dedent(f'\n Object {self.__class__.__name__} was created by passing\n non-serializable argument values in `__init__()`,\n and therefore the object must override `get_config()` in\n order to be serializable. Please implement `get_config()`.\n\n Example:\n\n class CustomLayer(keras.layers.Layer):\n def __init__(self, arg1, arg2, **kwargs):\n super().__init__(**kwargs)\n self.arg1 = arg1\n self.arg2 = arg2\n\n def get_config(self):\n config = super().get_config()\n config.update({{\n "arg1": self.arg1,\n "arg2": self.arg2,\n }})\n return config')) + + @classmethod + def from_config(cls, config): + if 'dtype' in config and isinstance(config['dtype'], dict): + config = config.copy() + policy = dtype_policies.deserialize(config['dtype']) + if not isinstance(policy, dtype_policies.DTypePolicyMap) and policy.quantization_mode is None: + policy = policy.name + config['dtype'] = policy + try: + return cls(**config) + except Exception as e: + raise TypeError(f"Error when deserializing class '{cls.__name__}' using config={config}.\n\nException encountered: {e}") + + def __repr__(self): + return f'' + + @property + def input(self): + return self._get_node_attribute_at_index(0, 'input_tensors', 'input') + + @property + def output(self): + return self._get_node_attribute_at_index(0, 'output_tensors', 'output') + + def _get_node_attribute_at_index(self, node_index, attr, attr_name): + if not self._inbound_nodes: + raise AttributeError(f'The layer {self.name} has never been called and thus has no defined {attr_name}.') + if not len(self._inbound_nodes) > node_index: + raise ValueError(f'Asked to get {attr_name} at node {node_index}, but the operation has only {len(self._inbound_nodes)} inbound nodes.') + values = getattr(self._inbound_nodes[node_index], attr) + if isinstance(values, list) and len(values) == 1: + return values[0] + else: + return values + + def _post_build(self): + pass + + def _setattr_hook(self, name, value): + return (name, value) + + def _post_track_variable(self, variable): + pass + + def _post_untrack_variable(self, variable): + pass + +# File: keras-master/keras/src/ops/operation_utils.py +import math +import numpy as np +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list + +def broadcast_shapes(shape1, shape2): + shape1 = list(shape1) + shape2 = list(shape2) + origin_shape1 = shape1 + origin_shape2 = shape2 + if len(shape1) > len(shape2): + shape2 = [1] * (len(shape1) - len(shape2)) + shape2 + if len(shape1) < len(shape2): + shape1 = [1] * (len(shape2) - len(shape1)) + shape1 + output_shape = list(shape1) + for i in range(len(shape1)): + if shape1[i] == 1: + output_shape[i] = shape2[i] + elif shape1[i] is None: + output_shape[i] = None if shape2[i] == 1 else shape2[i] + elif shape2[i] == 1 or shape2[i] is None or shape2[i] == shape1[i]: + output_shape[i] = shape1[i] + else: + raise ValueError(f'Cannot broadcast shape, the failure dim has value {shape1[i]}, which cannot be broadcasted to {shape2[i]}. Input shapes are: {origin_shape1} and {origin_shape2}.') + return output_shape + +def compute_expand_dims_output_shape(input_shape, axis): + input_shape = list(input_shape) + if axis is None: + axis = len(input_shape) + axis = to_tuple_or_list(axis) + out_ndim = len(axis) + len(input_shape) + axis = [canonicalize_axis(a, out_ndim) for a in axis] + shape_iter = iter(input_shape) + new_shape = [1 if ax in axis else next(shape_iter) for ax in range(out_ndim)] + return tuple(new_shape) + +def compute_pooling_output_shape(input_shape, pool_size, strides, padding='valid', data_format='channels_last'): + strides = pool_size if strides is None else strides + input_shape_origin = list(input_shape) + input_shape = np.array(input_shape) + if data_format == 'channels_last': + spatial_shape = input_shape[1:-1] + else: + spatial_shape = input_shape[2:] + none_dims = [] + for i in range(len(spatial_shape)): + if spatial_shape[i] is None: + spatial_shape[i] = -1 + none_dims.append(i) + pool_size = np.array(pool_size) + if padding == 'valid': + output_spatial_shape = np.floor((spatial_shape - pool_size) / strides) + 1 + for i in range(len(output_spatial_shape)): + if i not in none_dims and output_spatial_shape[i] < 0: + raise ValueError(f'Computed output size would be negative. Received: `inputs.shape={input_shape}` and `pool_size={pool_size}`.') + elif padding == 'same': + output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1 + else: + raise ValueError(f"Argument `padding` must be either 'valid' or 'same'. Received: padding={padding}") + output_spatial_shape = [int(i) for i in output_spatial_shape] + for i in none_dims: + output_spatial_shape[i] = None + output_spatial_shape = tuple(output_spatial_shape) + if data_format == 'channels_last': + output_shape = (input_shape_origin[0],) + output_spatial_shape + (input_shape_origin[-1],) + else: + output_shape = (input_shape_origin[0], input_shape_origin[1]) + output_spatial_shape + return output_shape + +def compute_conv_output_shape(input_shape, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1): + if data_format == 'channels_last': + spatial_shape = input_shape[1:-1] + kernel_shape = kernel_size + (input_shape[-1], filters) + else: + spatial_shape = input_shape[2:] + kernel_shape = kernel_size + (input_shape[1], filters) + if len(kernel_shape) != len(input_shape): + raise ValueError(f'Kernel shape must have the same length as input, but received kernel of shape {kernel_shape} and input of shape {input_shape}.') + if isinstance(dilation_rate, int): + dilation_rate = (dilation_rate,) * len(spatial_shape) + if isinstance(strides, int): + strides = (strides,) * len(spatial_shape) + if len(dilation_rate) != len(spatial_shape): + raise ValueError(f"Dilation must be None, scalar or tuple/list of length of inputs' spatial shape, but received `dilation_rate={dilation_rate}` and input of shape {input_shape}.") + none_dims = [] + spatial_shape = np.array(spatial_shape) + for i in range(len(spatial_shape)): + if spatial_shape[i] is None: + spatial_shape[i] = -1 + none_dims.append(i) + kernel_spatial_shape = np.array(kernel_shape[:-2]) + dilation_rate = np.array(dilation_rate) + if padding == 'valid': + output_spatial_shape = np.floor((spatial_shape - dilation_rate * (kernel_spatial_shape - 1) - 1) / strides) + 1 + for i in range(len(output_spatial_shape)): + if i not in none_dims and output_spatial_shape[i] < 0: + raise ValueError(f'Computed output size would be negative. Received `inputs shape={input_shape}`, `kernel shape={kernel_shape}`, `dilation_rate={dilation_rate}`.') + elif padding == 'same' or padding == 'causal': + output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1 + else: + raise ValueError(f"`padding` must be either `'valid'` or `'same'`. Received {padding}.") + output_spatial_shape = [int(i) for i in output_spatial_shape] + for i in none_dims: + output_spatial_shape[i] = None + output_spatial_shape = tuple(output_spatial_shape) + if data_format == 'channels_last': + output_shape = (input_shape[0],) + output_spatial_shape + (kernel_shape[-1],) + else: + output_shape = (input_shape[0], kernel_shape[-1]) + output_spatial_shape + return output_shape + +def compute_matmul_output_shape(shape1, shape2): + if len(shape1) == 1: + shape1 = (1, shape1[0]) + if len(shape2) == 1: + shape2 = (shape2[0], 1) + if shape1[-1] is not None and shape2[-2] is not None and (shape1[-1] != shape2[-2]): + raise ValueError(f'Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be equal, but received `x1.shape={shape1}` and `x2.shape={shape2}`.') + leading_shape = broadcast_shapes(shape1[:-2], shape2[:-2]) + last_2_dims_shape = [shape1[-2], shape2[-1]] + output_shape = leading_shape + last_2_dims_shape + if len(shape1) == 1: + del output_shape[-2] + if len(shape2) == 1: + del output_shape[-1] + return tuple(output_shape) + +def compute_reshape_output_shape(input_shape, newshape, newshape_arg_name): + unknown_dim_count = newshape.count(-1) + if unknown_dim_count > 1: + raise ValueError(f'There must be at most one unknown dimension (-1) in {newshape_arg_name}. Received: {newshape_arg_name}={newshape}.') + if None in input_shape: + return tuple((dim if dim != -1 else None for dim in newshape)) + input_size = math.prod(input_shape) + if unknown_dim_count == 0: + if input_size != math.prod(newshape): + raise ValueError(f'The total size of the tensor must be unchanged. Received: input_shape={input_shape}, {newshape_arg_name}={newshape}') + return newshape + known_output_size = 1 + unknown_dim_index = None + for (index, dim) in enumerate(newshape): + if dim == -1: + unknown_dim_index = index + else: + known_output_size *= dim + if known_output_size == 0 or input_size % known_output_size != 0: + raise ValueError(f'The total size of the tensor must be unchanged, however, the input size cannot by divided by the specified dimensions in {newshape_arg_name}. Received: input_shape={input_shape}, {newshape_arg_name}={newshape}') + output_shape = list(newshape) + output_shape[unknown_dim_index] = input_size // known_output_size + return tuple(output_shape) + +def compute_transpose_output_shape(input_shape, axes): + input_shape = list(input_shape) + if axes is None: + return tuple(input_shape[::-1]) + if len(axes) != len(input_shape): + raise ValueError(f'axis must be a list of the same length as the input shape, expected {len(input_shape)}, but received {len(axes)}.') + return tuple((input_shape[ax] for ax in axes)) + +def compute_take_along_axis_output_shape(input_shape, indices_shape, axis): + input_shape = list(input_shape) + indices_shape = list(indices_shape) + if axis is None: + input_shape = [None] if None in input_shape else [int(np.prod(input_shape))] + if len(input_shape) != len(indices_shape): + raise ValueError(f'`x` and `indices` must have the same number of dimensions, but receive shape {input_shape} and {indices_shape}.') + input_shape[axis] = indices_shape[axis] + output_shape = broadcast_shapes(input_shape, indices_shape) + return output_shape + +def reduce_shape(shape, axis=None, keepdims=False): + shape = list(shape) + if axis is None: + if keepdims: + return tuple([1 for _ in shape]) + else: + return tuple([]) + if keepdims: + for ax in axis: + shape[ax] = 1 + return tuple(shape) + else: + for ax in sorted(axis, reverse=True): + del shape[ax] + return tuple(shape) + +@keras_export('keras.utils.get_source_inputs') +def get_source_inputs(tensor): + if not hasattr(tensor, '_keras_history'): + return tensor + (operation, node_index, _) = tensor._keras_history + if not operation or not operation._inbound_nodes: + return [tensor] + else: + node = operation._inbound_nodes[node_index] + if node.is_input: + return tree.flatten(node.output_tensors) + else: + source_tensors = [] + for tensor in node.input_tensors: + previous_sources = get_source_inputs(tensor) + for x in previous_sources: + if all((x is not t for t in source_tensors)): + source_tensors.append(x) + return source_tensors + +# File: keras-master/keras/src/ops/symbolic_arguments.py +from keras.src import tree +from keras.src.backend import KerasTensor + +class SymbolicArguments: + + def __init__(self, *args, **kwargs): + self.args = tree.map_structure(lambda x: x, args) + self.kwargs = tree.map_structure(lambda x: x, kwargs) + self._flat_arguments = tree.flatten((self.args, self.kwargs)) + if not self.kwargs and len(self.args) == 1 and isinstance(self.args[0], KerasTensor): + self._single_positional_tensor = self.args[0] + else: + self._single_positional_tensor = None + self.keras_tensors = [] + for arg in self._flat_arguments: + if isinstance(arg, KerasTensor): + self.keras_tensors.append(arg) + + def convert(self, conversion_fn): + args = tree.map_structure(conversion_fn, self.args) + kwargs = tree.map_structure(conversion_fn, self.kwargs) + return (args, kwargs) + + def fill_in(self, tensor_dict): + if self._single_positional_tensor is not None: + return ((tensor_dict[id(self._single_positional_tensor)],), {}) + + def switch_fn(x): + if isinstance(x, KerasTensor): + return tensor_dict.get(id(x), None) + return x + return self.convert(switch_fn) + +# File: keras-master/keras/src/optimizers/__init__.py +from keras.src.api_export import keras_export +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD +from keras.src.saving import serialization_lib +ALL_OBJECTS = {Optimizer, Adam, SGD, RMSprop, Adadelta, AdamW, Adagrad, Adamax, Adafactor, Nadam, Ftrl, Lion, LossScaleOptimizer} +ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS} + +@keras_export('keras.optimizers.serialize') +def serialize(optimizer): + return serialization_lib.serialize_keras_object(optimizer) + +@keras_export('keras.optimizers.deserialize') +def deserialize(config, custom_objects=None): + if config['class_name'].lower() in ALL_OBJECTS_DICT: + config['class_name'] = config['class_name'].lower() + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.optimizers.get') +def get(identifier): + if identifier is None: + return None + elif isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + config = {'class_name': identifier, 'config': {}} + obj = deserialize(config) + else: + obj = identifier + if isinstance(obj, Optimizer): + return obj + raise ValueError(f'Could not interpret optimizer identifier: {identifier}') + +@keras_export(['keras.optimizers.legacy.Adagrad', 'keras.optimizers.legacy.Adam', 'keras.optimizers.legacy.Ftrl', 'keras.optimizers.legacy.RMSprop', 'keras.optimizers.legacy.SGD', 'keras.optimizers.legacy.Optimizer']) +class LegacyOptimizerWarning: + + def __init__(self, *args, **kwargs): + raise ImportError('`keras.optimizers.legacy` is not supported in Keras 3. When using `tf.keras`, to continue using a `tf.keras.optimizers.legacy` optimizer, you can install the `tf_keras` package (Keras 2) and set the environment variable `TF_USE_LEGACY_KERAS=True` to configure TensorFlow to use `tf_keras` when accessing `tf.keras`.') + +# File: keras-master/keras/src/optimizers/adadelta.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Adadelta']) +class Adadelta(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adadelta', **kwargs): + super().__init__(learning_rate=learning_rate, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs) + self.rho = rho + self.epsilon = epsilon + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._accumulated_grads = [] + self._accumulated_delta_vars = [] + for var in var_list: + self._accumulated_grads.append(self.add_variable_from_reference(var, 'accumulated_grad')) + self._accumulated_delta_vars.append(self.add_variable_from_reference(var, 'accumulated_delta_var')) + + def update_step(self, grad, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + grad = ops.cast(grad, variable.dtype) + rho = self.rho + accumulated_grad = self._accumulated_grads[self._get_variable_index(variable)] + accumulated_delta_var = self._accumulated_delta_vars[self._get_variable_index(variable)] + + def rms(x): + return ops.sqrt(ops.add(x, self.epsilon)) + self.assign(accumulated_grad, ops.add(rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad)))) + delta_var = ops.negative(ops.divide(ops.multiply(rms(accumulated_delta_var), grad), rms(accumulated_grad))) + self.assign(accumulated_delta_var, ops.add(ops.multiply(rho, accumulated_delta_var), ops.multiply(1 - rho, ops.square(delta_var)))) + self.assign_add(variable, ops.multiply(lr, delta_var)) + + def get_config(self): + config = super().get_config() + config.update({'rho': self.rho, 'epsilon': self.epsilon}) + return config +Adadelta.__doc__ = Adadelta.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/adafactor.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Adafactor']) +class Adafactor(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_2_decay=-0.8, epsilon_1=1e-30, epsilon_2=0.001, clip_threshold=1.0, relative_step=True, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adafactor', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_2_decay = beta_2_decay + self.epsilon_1 = epsilon_1 + self.epsilon_2 = epsilon_2 + self.clip_threshold = clip_threshold + self.relative_step = relative_step + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._r = [] + self._c = [] + self._v = [] + for var in var_list: + if len(var.shape) < 2: + with backend.name_scope(self.name, caller=self): + self._r.append(backend.Variable(0, name=var.name, trainable=False)) + self._c.append(backend.Variable(0, name=var.name, trainable=False)) + else: + r_shape = var.shape[:-1] + c_shape = var.shape[:-2] + (var.shape[-1],) + self._r.append(self.add_variable(shape=r_shape, dtype=var.dtype, name=var.name)) + self._c.append(self.add_variable(shape=c_shape, dtype=var.dtype, name=var.name)) + self._v.append(self.add_variable_from_reference(reference_variable=var, name='velocity')) + + def _rms(self, x): + return ops.sqrt(ops.mean(ops.square(x))) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + epsilon_2 = ops.cast(self.epsilon_2, variable.dtype) + one = ops.cast(1.0, variable.dtype) + local_step = ops.cast(self.iterations + 1, variable.dtype) + if not callable(self._learning_rate) and self.relative_step: + lr = ops.minimum(lr, 1 / ops.sqrt(local_step)) + r = self._r[self._get_variable_index(variable)] + c = self._c[self._get_variable_index(variable)] + v = self._v[self._get_variable_index(variable)] + rho_t = ops.minimum(lr, 1 / ops.sqrt(local_step)) + alpha_t = ops.maximum(epsilon_2, self._rms(variable)) * rho_t + regulated_grad_square = ops.add(ops.square(gradient), self.epsilon_1) + beta_2_t = 1 - ops.power(local_step, self.beta_2_decay) + if len(variable.shape) >= 2: + self.assign(r, beta_2_t * r + (1 - beta_2_t) * ops.mean(regulated_grad_square, axis=-1)) + self.assign(c, beta_2_t * c + (1 - beta_2_t) * ops.mean(regulated_grad_square, axis=-2)) + self.assign(v, ops.expand_dims(r / ops.mean(r, axis=-1, keepdims=True), axis=-1) * ops.expand_dims(c, -2)) + else: + self.assign(v, beta_2_t * v + (1 - beta_2_t) * regulated_grad_square) + u_t = ops.divide(gradient, ops.sqrt(v)) + u_t_hat = ops.divide(u_t, ops.maximum(one, ops.divide(self._rms(u_t), self.clip_threshold))) + self.assign_sub(variable, ops.multiply(alpha_t, u_t_hat)) + + def get_config(self): + config = super().get_config() + config.update({'beta_2_decay': self.beta_2_decay, 'epsilon_1': self.epsilon_1, 'epsilon_2': self.epsilon_2, 'clip_threshold': self.clip_threshold, 'relative_step': self.relative_step}) + return config +Adafactor.__doc__ = Adafactor.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/adagrad.py +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Adagrad']) +class Adagrad(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adagrad', **kwargs): + super().__init__(learning_rate=learning_rate, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs) + self.initial_accumulator_value = initial_accumulator_value + self.epsilon = epsilon + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._accumulators = [] + initializer = initializers.Constant(self.initial_accumulator_value) + for var in var_list: + self._accumulators.append(self.add_variable(shape=var.shape, initializer=initializer, dtype=var.dtype, name='accumulator')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + accumulator = self._accumulators[self._get_variable_index(variable)] + self.assign_add(accumulator, ops.square(gradient)) + self.assign_sub(variable, ops.divide(ops.multiply(lr, gradient), ops.sqrt(ops.add(accumulator, self.epsilon)))) + + def get_config(self): + config = super().get_config() + config.update({'initial_accumulator_value': self.initial_accumulator_value, 'epsilon': self.epsilon}) + return config +Adagrad.__doc__ = Adagrad.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/adam.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Adam']) +class Adam(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adam', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + self.amsgrad = amsgrad + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._momentums = [] + self._velocities = [] + for var in var_list: + self._momentums.append(self.add_variable_from_reference(reference_variable=var, name='momentum')) + self._velocities.append(self.add_variable_from_reference(reference_variable=var, name='velocity')) + if self.amsgrad: + self._velocity_hats = [] + for var in var_list: + self._velocity_hats.append(self.add_variable_from_reference(reference_variable=var, name='velocity_hat')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + local_step = ops.cast(self.iterations + 1, variable.dtype) + beta_1_power = ops.power(ops.cast(self.beta_1, variable.dtype), local_step) + beta_2_power = ops.power(ops.cast(self.beta_2, variable.dtype), local_step) + m = self._momentums[self._get_variable_index(variable)] + v = self._velocities[self._get_variable_index(variable)] + alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power) + self.assign_add(m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)) + self.assign_add(v, ops.multiply(ops.subtract(ops.square(gradient), v), 1 - self.beta_2)) + if self.amsgrad: + v_hat = self._velocity_hats[self._get_variable_index(variable)] + self.assign(v_hat, ops.maximum(v_hat, v)) + v = v_hat + self.assign_sub(variable, ops.divide(ops.multiply(m, alpha), ops.add(ops.sqrt(v), self.epsilon))) + + def get_config(self): + config = super().get_config() + config.update({'beta_1': self.beta_1, 'beta_2': self.beta_2, 'epsilon': self.epsilon, 'amsgrad': self.amsgrad}) + return config +Adam.__doc__ = Adam.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/adamax.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Adamax']) +class Adamax(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adamax', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._m = [] + self._u = [] + for var in var_list: + self._m.append(self.add_variable_from_reference(reference_variable=var, name='momentum')) + self._u.append(self.add_variable_from_reference(reference_variable=var, name='norm')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + local_step = ops.cast(self.iterations + 1, variable.dtype) + beta_1_power = ops.power(ops.cast(self.beta_1, variable.dtype), local_step) + m = self._m[self._get_variable_index(variable)] + u = self._u[self._get_variable_index(variable)] + self.assign_add(m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)) + self.assign(u, ops.maximum(ops.multiply(self.beta_2, u), ops.abs(gradient))) + self.assign_sub(variable, ops.divide(ops.multiply(lr, m), ops.multiply(1 - beta_1_power, ops.add(u, self.epsilon)))) + + def get_config(self): + config = super().get_config() + config.update({'beta_1': self.beta_1, 'beta_2': self.beta_2, 'epsilon': self.epsilon}) + return config +Adamax.__doc__ = Adamax.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/adamw.py +from keras.src.api_export import keras_export +from keras.src.optimizers import adam +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.AdamW']) +class AdamW(adam.Adam): + + def __init__(self, learning_rate=0.001, weight_decay=0.004, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adamw', **kwargs): + super().__init__(learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, amsgrad=amsgrad, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + if self.weight_decay is None: + raise ValueError('Argument `weight_decay` must be a float. Received: weight_decay=None') +AdamW.__doc__ = AdamW.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/base_optimizer.py +import re +import warnings +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.optimizers.schedules import learning_rate_schedule +from keras.src.saving import serialization_lib +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils import tracking +from keras.src.utils.naming import auto_name + +class BaseOptimizer(KerasSaveable): + + def __init__(self, learning_rate, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name=None, **kwargs): + self._lock = False + if kwargs.pop('decay', None) is not None: + warnings.warn('Argument `decay` is no longer supported and will be ignored.') + if kwargs: + raise ValueError(f'Argument(s) not recognized: {kwargs}') + if name is None: + name = auto_name(self.__class__.__name__) + self.name = name + self.weight_decay = weight_decay + self.clipnorm = clipnorm + self.global_clipnorm = global_clipnorm + self.clipvalue = clipvalue + self.use_ema = use_ema + self.loss_scale_factor = loss_scale_factor + self.gradient_accumulation_steps = gradient_accumulation_steps + if gradient_accumulation_steps: + if not gradient_accumulation_steps >= 2: + raise ValueError(f'`gradient_accumulation_steps` must be an integer >= 2. Received: gradient_accumulation_steps={gradient_accumulation_steps}') + if use_ema: + if ema_momentum > 1 or ema_momentum < 0: + raise ValueError(f'`ema_momentum` must be in the range [0, 1]. Received: ema_momentum={ema_momentum}') + if ema_overwrite_frequency and (not isinstance(ema_overwrite_frequency, int) or ema_overwrite_frequency < 1): + raise ValueError(f'`ema_overwrite_frequency` must be an integer >= 1 or None. Received: ema_overwrite_frequency={ema_overwrite_frequency}') + self.ema_momentum = ema_momentum + self.ema_overwrite_frequency = ema_overwrite_frequency + clip_args_sum = sum((a is not None for a in [clipnorm, clipvalue, global_clipnorm])) + if clip_args_sum > 1: + raise ValueError(f'Only one of `clipnorm`, `clipvalue` and `global_clipnorm` can be set. Received: clipnorm={clipnorm}, clipvalue={clipvalue}, global_clipnorm={global_clipnorm}') + self.built = False + self._variables = [] + self._trainable_variables = [] + self._tracker = tracking.Tracker({'variables': (lambda x: isinstance(x, backend.Variable), self._variables)}) + self._trainable_variables_indices = {} + with backend.name_scope(self.name, caller=self): + iterations = backend.Variable(0, name='iteration', dtype='int', trainable=False, aggregation='only_first_replica') + self._track_variable(iterations) + self._iterations = iterations + if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule): + self._learning_rate = learning_rate + elif callable(learning_rate): + self._learning_rate = learning_rate + else: + if not isinstance(learning_rate, float): + raise ValueError(f'Argument `learning_rate` should be float, or an instance of LearningRateSchedule, or a callable (that takes in the current iteration value and returns the corresponding learning rate value). Received instead: learning_rate={learning_rate}') + with backend.name_scope(self.name, caller=self): + learning_rate = backend.Variable(learning_rate, name='learning_rate', dtype=backend.floatx(), trainable=False, aggregation='only_first_replica') + self._track_variable(learning_rate) + self._learning_rate = learning_rate + + @property + def iterations(self): + if self.gradient_accumulation_steps: + return ops.floor_divide(self._iterations, self.gradient_accumulation_steps) + return self._iterations + + def _track_variable(self, variable): + self._tracker.add_to_store('variables', variable) + + @tracking.no_automatic_dependency_tracking + def build(self, variables): + if self.use_ema: + self._model_variables_moving_average = [] + if self.gradient_accumulation_steps: + self._accumulated_gradients = [] + for (i, variable) in enumerate(variables): + self._trainable_variables_indices[self._var_key(variable)] = i + if self.use_ema: + self._model_variables_moving_average.append(self.add_variable_from_reference(variable, name='average')) + if self.gradient_accumulation_steps: + self._accumulated_gradients.append(self.add_variable_from_reference(variable, name='gradient_accumulator')) + self._trainable_variables = variables[:] + self.built = True + + def _var_key(self, variable): + return id(variable) + + @property + def variables(self): + return self._variables[:] + + def _get_variable_index(self, variable): + return self._trainable_variables_indices[self._var_key(variable)] + + def add_variable(self, shape, initializer='zeros', dtype=None, aggregation='mean', name=None): + self._check_super_called() + initializer = initializers.get(initializer) + with backend.name_scope(self.name, caller=self): + variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=False, aggregation=aggregation, name=name) + self._track_variable(variable) + return variable + + def add_variable_from_reference(self, reference_variable, name=None, initializer='zeros'): + name = name or 'var' + if hasattr(reference_variable, 'path'): + name = reference_variable.path.replace('/', '_') + '_' + name + else: + name = str(reference_variable.name).replace('/', '_').replace(':', '_') + '_' + name + return self.add_variable(shape=reference_variable.shape, initializer=initializer, dtype=reference_variable.dtype, name=name) + + def _check_variables_are_known(self, variables): + for v in variables: + if self._var_key(v) not in self._trainable_variables_indices: + raise ValueError(f'Unknown variable: {v}. This optimizer can only be called for the variables it was originally built with. When working with a new set of variables, you should recreate a new optimizer instance.') + + def assign(self, variable, value): + variable.assign(value) + + def assign_add(self, variable, value): + variable.assign_add(value) + + def assign_sub(self, variable, value): + variable.assign_sub(value) + + def update_step(self, gradient, variable, learning_rate): + raise NotImplementedError + + def apply_gradients(self, grads_and_vars): + (grads, trainable_variables) = zip(*grads_and_vars) + self.apply(grads, trainable_variables) + return self._iterations + + def apply(self, grads, trainable_variables=None): + if len(grads) == 0: + return + if trainable_variables is None: + if not self.built: + raise ValueError('When passing `grads` without `variables`, the optimizer must already be built on a list of variables. Call `optimizer.build(trainable_variables)` first. ') + if len(grads) != len(self._trainable_variables_indices): + raise ValueError(f'When passing `grads` as a list of gradient tensors, the gradients must match `optimizer.variables` one-to-on. Received a list of {len(grads)} gradients, but the optimizer is tracking {len(self._trainable_variables)} trainable variables.') + trainable_variables = self._trainable_variables + else: + trainable_variables = list(trainable_variables) + if not self.built: + with backend.name_scope(self.name, caller=self): + self.build(trainable_variables) + self.built = True + self._check_variables_are_known(trainable_variables) + with backend.name_scope(self.name, caller=self): + (grads, trainable_variables) = self._overwrite_variables_directly_with_gradients(grads, trainable_variables) + (grads, trainable_variables) = self._filter_empty_gradients(grads, trainable_variables) + if len(list(grads)) == 0: + return + scale = self.loss_scale_factor + if scale is not None: + grads = [g if g is None else g / scale for g in grads] + self._backend_apply_gradients(grads, trainable_variables) + for variable in trainable_variables: + if variable.constraint is not None: + variable.assign(variable.constraint(variable)) + + def _backend_apply_gradients(self, grads, trainable_variables): + if self.gradient_accumulation_steps: + is_update_step = (self._iterations + 1) % self.gradient_accumulation_steps == 0 + acc_grads = [self._accumulated_gradients[self._get_variable_index(v)] for v in trainable_variables] + + def _update_step_fn(grads, trainable_variables): + steps = self.gradient_accumulation_steps + grads = [(g + acc_g) / steps for (g, acc_g) in zip(grads, acc_grads)] + grads = self._clip_gradients(grads) + self._apply_weight_decay(trainable_variables) + self._backend_update_step(grads, trainable_variables, self.learning_rate) + self._backend_reset_gradient_accumulators() + ops.cond(is_update_step, lambda : _update_step_fn(grads, trainable_variables), lambda : self._backend_increment_gradient_accumulators(grads, acc_grads)) + else: + grads = self._clip_gradients(grads) + self._apply_weight_decay(trainable_variables) + self._backend_update_step(grads, trainable_variables, self.learning_rate) + if self.use_ema: + self._update_model_variables_moving_average(self._trainable_variables) + if self.ema_overwrite_frequency: + should_overwrite_model_vars = (self.iterations + 1) % self.ema_overwrite_frequency == 0 + ops.cond(should_overwrite_model_vars, lambda : self._overwrite_model_variables_with_average_value(self._trainable_variables), lambda : None) + self._iterations.assign_add(1) + + def _backend_update_step(self, grads, trainable_variables, learning_rate): + for (grad, var) in zip(grads, trainable_variables): + self.update_step(grad, var, learning_rate) + + def _backend_reset_gradient_accumulators(self): + for g_acc in self._accumulated_gradients: + g_acc.assign(ops.zeros(g_acc.shape, dtype=g_acc.dtype)) + + def _backend_increment_gradient_accumulators(self, grads, acc_grads): + new_g_accs = [g + acc_g for (g, acc_g) in zip(grads, acc_grads)] + for (n_g_acc, g_acc) in zip(new_g_accs, acc_grads): + g_acc.assign(n_g_acc) + + def stateless_apply(self, optimizer_variables, grads, trainable_variables): + self._check_super_called() + if not self.built: + raise ValueError(f'To call `stateless_apply`, {self.__class__.__name__} must be built (i.e. its variables must have been created). You can build it via `optimizer.build(trainable_variables)`.') + if len(optimizer_variables) != len(self.variables): + raise ValueError(f'Argument `optimizer_variables` must be a list of tensors corresponding 1:1 to {self.__class__.__name__}().variables. Received list with length {len(optimizer_variables)}, but expected {len(self.variables)} variables.') + if len(trainable_variables) != len(self._trainable_variables): + raise ValueError(f'Argument `optimizer_variables` must be a list of tensors corresponding 1:1 to the trainable variables list that the optimizer was built with. Received len(trainable_variables) == {len(trainable_variables)} whereas the optimizer was built with {len(self._trainable_variables)} variables.') + mapping = list(zip(self._trainable_variables, trainable_variables)) + list(zip(self.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping) as scope: + self.apply(grads) + trainable_variables = [] + for v in self._trainable_variables: + new_v = scope.get_current_value(v) + if new_v is not None: + trainable_variables.append(new_v) + else: + trainable_variables.append(v) + optimizer_variables = [] + for v in self.variables: + new_v = scope.get_current_value(v) + if new_v is not None: + optimizer_variables.append(new_v) + else: + optimizer_variables.append(v) + return (trainable_variables, optimizer_variables) + + def scale_loss(self, loss): + if self.loss_scale_factor is not None: + return loss * self.loss_scale_factor + return loss + + @property + def learning_rate(self): + return self._get_current_learning_rate() + + @learning_rate.setter + def learning_rate(self, learning_rate): + if isinstance(self._learning_rate, backend.Variable): + prev_lr_var = self._learning_rate + else: + prev_lr_var = None + if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule): + self._learning_rate = learning_rate + elif callable(learning_rate): + self._learning_rate = learning_rate + else: + if isinstance(self._learning_rate, learning_rate_schedule.LearningRateSchedule): + raise TypeError('This optimizer was created with a `LearningRateSchedule` object as its `learning_rate` constructor argument, hence its learning rate is not settable. If you need the learning rate to be settable, you should instantiate the optimizer with a float `learning_rate` argument.') + self._learning_rate.assign(learning_rate) + if prev_lr_var is not None and (not isinstance(self._learning_rate, backend.Variable)): + self._untrack_variable(prev_lr_var) + + def set_weights(self, weights): + if not self.built: + raise ValueError('You are calling `set_weights()` on an optimizer that has not yet been built. Please call `optimizer.build(trainable_variables)` to create the optimizer weights before calling `set_weights()`.') + for (variable, weight) in zip(self._variables, weights): + if variable.shape != weight.shape: + raise ValueError(f'Optimizer variable {self._var_key(variable)} has shape {str(variable.shape)} not compatible with provided weight shape {str(weight.shape)}.') + variable.assign(weight) + + def save_own_variables(self, store): + for (i, variable) in enumerate(self.variables): + store[str(i)] = variable.numpy() + + def load_own_variables(self, store): + if len(store.keys()) != len(self.variables): + msg = f"Skipping variable loading for optimizer '{self.name}', because it has {len(self.variables)} variables whereas the saved optimizer has {len(store.keys())} variables. " + if len(self.variables) == 0: + msg += 'This is likely because the optimizer has not been called/built yet.' + warnings.warn(msg, stacklevel=2) + return + for (i, variable) in enumerate(self.variables): + variable.assign(store[str(i)]) + + def _get_current_learning_rate(self): + if isinstance(self._learning_rate, learning_rate_schedule.LearningRateSchedule): + return self._learning_rate(self._iterations) + elif callable(self._learning_rate): + return self._learning_rate() + return self._learning_rate + + def _overwrite_variables_directly_with_gradients(self, grads, vars): + if not hasattr(vars[0], 'overwrite_with_gradient'): + return (grads, vars) + filtered_grads = list(grads) + filtered_vars = list(vars) + for i in range(len(filtered_grads) - 1, -1, -1): + (g, v) = (filtered_grads[i], filtered_vars[i]) + if v.overwrite_with_gradient: + if self.gradient_accumulation_steps: + steps = self.gradient_accumulation_steps + is_update_step = (self._iterations + 1) % steps == 0 + acc_g = self._accumulated_gradients[self._get_variable_index(v)] + new_g_acc = ops.cond(is_update_step, lambda : ops.zeros(g.shape, dtype=g.dtype), lambda : ops.maximum(g, acc_g)) + new_g = ops.cond(is_update_step, lambda : ops.maximum(g, acc_g), lambda : g) + new_v = ops.cond(is_update_step, lambda : new_g, lambda : v.value) + v.assign(new_v) + acc_g.assign(new_g_acc) + else: + v.assign(g) + filtered_grads.pop(i) + filtered_vars.pop(i) + return (filtered_grads, filtered_vars) + + def _filter_empty_gradients(self, grads, vars): + filtered_grads = list(grads) + filtered_vars = list(vars) + missing_grad_vars = [] + for i in range(len(filtered_grads) - 1, -1, -1): + if filtered_grads[i] is None: + filtered_grads.pop(i) + v = filtered_vars.pop(i) + missing_grad_vars.append(v.name) + if not filtered_grads: + raise ValueError('No gradients provided for any variable.') + if missing_grad_vars: + warnings.warn(f'Gradients do not exist for variables {list(reversed(missing_grad_vars))} when minimizing the loss. If using `model.compile()`, did you forget to provide a `loss` argument?') + return (filtered_grads, filtered_vars) + + def _clip_gradients(self, grads): + if self.clipnorm and self.clipnorm > 0: + return [self._clip_by_norm(g) if g is not None else g for g in grads] + elif self.global_clipnorm and self.global_clipnorm > 0: + return clip_by_global_norm(grads, self.global_clipnorm) + elif self.clipvalue and self.clipvalue > 0: + v = self.clipvalue + return [ops.clip(g, -v, v) if g is not None else g for g in grads] + else: + return grads + + def exclude_from_weight_decay(self, var_list=None, var_names=None): + if hasattr(self, '_built') and self._built: + raise ValueError('`exclude_from_weight_decay()` can only be configued before the optimizer is built.') + if var_list: + self._exclude_from_weight_decay = set((self._var_key(variable) for variable in var_list)) + else: + self._exclude_from_weight_decay = set() + if var_names and len(var_names) > 0: + self._exclude_from_weight_decay_pattern = re.compile('|'.join(set(var_names))) + else: + self._exclude_from_weight_decay_pattern = None + self._exclude_from_weight_decay_cache = dict() + + def _use_weight_decay(self, variable): + variable_id = self._var_key(variable) + if not hasattr(self, '_exclude_from_weight_decay_cache'): + self._exclude_from_weight_decay_cache = dict() + if variable_id in self._exclude_from_weight_decay_cache: + return self._exclude_from_weight_decay_cache[variable_id] + exclude_from_weight_decay = getattr(self, '_exclude_from_weight_decay', set()) + exclude_from_weight_decay_pattern = getattr(self, '_exclude_from_weight_decay_pattern', None) + if variable_id in exclude_from_weight_decay: + self._exclude_from_weight_decay_cache[variable_id] = False + return False + if exclude_from_weight_decay_pattern is not None: + if re.search(exclude_from_weight_decay_pattern, variable.name) is not None: + self._exclude_from_weight_decay_cache[variable_id] = False + return False + self._exclude_from_weight_decay_cache[variable_id] = True + return True + + def _apply_weight_decay(self, variables): + if self.weight_decay is None: + return + for variable in variables: + if self._use_weight_decay(variable): + lr = ops.cast(self.learning_rate, variable.dtype) + wd = ops.cast(self.weight_decay, variable.dtype) + variable.assign(variable - variable * wd * lr) + + def _check_super_called(self): + if not hasattr(self, '_lock'): + raise RuntimeError(f"In optimizer '{self.__class__.__name__}', you forgot to call `super().__init__()` as the first statement in the `__init__()` method. Go add it!") + + def _update_model_variables_moving_average(self, trainable_variables): + if self.use_ema: + for (var, average) in zip(trainable_variables, self._model_variables_moving_average): + not_first_step = ops.not_equal(self.iterations, 0) + momentum = ops.cast(not_first_step, var.dtype) * self.ema_momentum + average.assign(momentum * average + (1 - momentum) * var) + + def _overwrite_model_variables_with_average_value(self, trainable_variables): + if len(trainable_variables) != len(self._model_variables_moving_average): + raise ValueError(f'The length of model variables ({len(trainable_variables)}) to override does not match the length of model variables stored in the optimizer ({len(self._model_variables_moving_average)}). Please check if the optimizer was called on your model.') + for (var, average_var) in zip(trainable_variables, self._model_variables_moving_average): + var.assign(average_var) + + def finalize_variable_values(self, var_list): + if self.use_ema: + self._overwrite_model_variables_with_average_value(var_list) + + def _obj_type(self): + return 'Optimizer' + + def get_config(self): + if isinstance(self._learning_rate, learning_rate_schedule.LearningRateSchedule): + learning_rate = learning_rate_schedule.serialize(self._learning_rate) + elif isinstance(self._learning_rate, backend.Variable): + learning_rate = float(self._learning_rate.numpy()) + elif ops.is_tensor(self._learning_rate): + learning_rate = float(self._learning_rate) + elif callable(self._learning_rate): + learning_rate = serialization_lib.serialize_keras_object(self._learning_rate) + config = {'name': self.name, 'learning_rate': learning_rate, 'weight_decay': self.weight_decay, 'clipnorm': self.clipnorm, 'global_clipnorm': self.global_clipnorm, 'clipvalue': self.clipvalue, 'use_ema': self.use_ema, 'ema_momentum': self.ema_momentum, 'ema_overwrite_frequency': self.ema_overwrite_frequency, 'loss_scale_factor': self.loss_scale_factor, 'gradient_accumulation_steps': self.gradient_accumulation_steps} + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + if 'learning_rate' in config: + if isinstance(config['learning_rate'], dict): + config['learning_rate'] = serialization_lib.deserialize_keras_object(config['learning_rate'], custom_objects=custom_objects) + return cls(**config) + + def __setattr__(self, name, value): + if name != '_lock': + self._check_super_called() + if hasattr(self, '_tracker'): + value = self._tracker.track(value) + return super().__setattr__(name, value) + + def _clip_by_norm(self, values, axes=None): + l2sum = ops.sum(ops.square(values), axes, keepdims=True) + pred = l2sum > 0 + l2sum_safe = ops.where(pred, l2sum, ops.ones_like(l2sum)) + l2norm = ops.where(pred, ops.sqrt(l2sum_safe), l2sum) + intermediate = ops.multiply(values, self.clipnorm) + values_clip = ops.convert_to_tensor(intermediate) / ops.maximum(l2norm, self.clipnorm) + return values_clip + + def _untrack_variable(self, variable): + previous_lock_state = self._tracker.locked + self._tracker.unlock() + self._tracker.untrack(variable) + if previous_lock_state is True: + self._tracker.lock() +base_optimizer_keyword_args = 'name: String. The name to use\n for momentum accumulator weights created by\n the optimizer.\n weight_decay: Float. If set, weight decay is applied.\n clipnorm: Float. If set, the gradient of each weight is individually\n clipped so that its norm is no higher than this value.\n clipvalue: Float. If set, the gradient of each weight is clipped to be\n no higher than this value.\n global_clipnorm: Float. If set, the gradient of all weights is clipped\n so that their global norm is no higher than this value.\n use_ema: Boolean, defaults to `False`.\n If `True`, exponential moving average\n (EMA) is applied. EMA consists of computing an exponential moving\n average of the weights of the model (as the weight values change\n after each training batch), and periodically overwriting the\n weights with their moving average.\n ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.\n This is the momentum to use when computing\n the EMA of the model\'s weights:\n `new_average = ema_momentum * old_average + (1 - ema_momentum) *\n current_variable_value`.\n ema_overwrite_frequency: Int or None, defaults to None. Only used if\n `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,\n we overwrite the model variable by its moving average.\n If None, the optimizer\n does not overwrite model variables in the middle of training,\n and you need to explicitly overwrite the variables\n at the end of training by calling\n `optimizer.finalize_variable_values()` (which updates the model\n variables in-place). When using the built-in `fit()` training loop,\n this happens automatically after the last epoch,\n and you don\'t need to do anything.\n loss_scale_factor: Float or `None`. If a float, the scale factor will\n be multiplied the loss before computing gradients, and the inverse\n of the scale factor will be multiplied by the gradients before\n updating variables. Useful for preventing underflow during\n mixed precision training. Alternately,\n `keras.optimizers.LossScaleOptimizer` will\n automatically set a loss scale factor.\n gradient_accumulation_steps: Int or `None`. If an int, model & optimizer\n variables will not be updated at every step; instead they will be\n updated every `gradient_accumulation_steps` steps, using the average\n value of the gradients since the last update. This is known as\n "gradient accumulation". This can be useful\n when your batch size is very small, in order to reduce gradient\n noise at each update step. EMA frequency will look at "accumulated"\n iterations value (optimizer steps // gradient_accumulation_steps).\n Learning rate schedules will look at "real" iterations value\n (optimizer steps).\n' + +def global_norm(value_list): + squared_norms = [ops.sum(ops.square(v)) for v in value_list if v is not None] + squared_norm = ops.sum(ops.stack(squared_norms)) + return ops.sqrt(squared_norm) + +def clip_by_global_norm(value_list, clip_norm): + use_norm = global_norm(value_list) + scale_for_finite = clip_norm * ops.minimum(1.0 / use_norm, 1.0 / clip_norm) + scale = scale_for_finite + (use_norm - use_norm) + return [v * scale if v is not None else v for v in value_list] + +# File: keras-master/keras/src/optimizers/ftrl.py +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Ftrl']) +class Ftrl(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, l2_shrinkage_regularization_strength=0.0, beta=0.0, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='ftrl', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + if initial_accumulator_value < 0.0: + raise ValueError(f'`initial_accumulator_value` needs to be positive or zero. Received: initial_accumulator_value={initial_accumulator_value}.') + if learning_rate_power > 0.0: + raise ValueError(f'`learning_rate_power` needs to be negative or zero. Received: learning_rate_power={learning_rate_power}.') + if l1_regularization_strength < 0.0: + raise ValueError(f'`l1_regularization_strength` needs to be positive or zero. Received: l1_regularization_strength={l1_regularization_strength}.') + if l2_regularization_strength < 0.0: + raise ValueError(f'`l2_regularization_strength` needs to be positive or zero. Received: l2_regularization_strength={l2_regularization_strength}.') + if l2_shrinkage_regularization_strength < 0.0: + raise ValueError(f'`l2_shrinkage_regularization_strength` needs to be positive or zero. Received: l2_shrinkage_regularization_strength={l2_shrinkage_regularization_strength}.') + self.learning_rate_power = learning_rate_power + self.initial_accumulator_value = initial_accumulator_value + self.l1_regularization_strength = l1_regularization_strength + self.l2_regularization_strength = l2_regularization_strength + self.l2_shrinkage_regularization_strength = l2_shrinkage_regularization_strength + self.beta = beta + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._accumulators = [] + self._linears = [] + for var in var_list: + self._accumulators.append(self.add_variable(shape=var.shape, dtype=var.dtype, name='accumulator', initializer=initializers.Constant(self.initial_accumulator_value))) + self._linears.append(self.add_variable_from_reference(reference_variable=var, name='linear')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + accum = self._accumulators[self._get_variable_index(variable)] + linear = self._linears[self._get_variable_index(variable)] + lr_power = self.learning_rate_power + l2_reg = self.l2_regularization_strength + l2_reg = l2_reg + self.beta / (2.0 * lr) + grad_to_use = ops.add(gradient, ops.multiply(2 * self.l2_shrinkage_regularization_strength, variable)) + new_accum = ops.add(accum, ops.square(gradient)) + self.assign_add(linear, ops.subtract(grad_to_use, ops.multiply(ops.divide(ops.subtract(ops.power(new_accum, -lr_power), ops.power(accum, -lr_power)), lr), variable))) + quadratic = ops.add(ops.divide(ops.power(new_accum, -lr_power), lr), 2 * l2_reg) + linear_clipped = ops.clip(linear, -self.l1_regularization_strength, self.l1_regularization_strength) + self.assign(variable, ops.divide(ops.subtract(linear_clipped, linear), quadratic)) + self.assign(accum, new_accum) + + def get_config(self): + config = super().get_config() + config.update({'learning_rate_power': self.learning_rate_power, 'initial_accumulator_value': self.initial_accumulator_value, 'l1_regularization_strength': self.l1_regularization_strength, 'l2_regularization_strength': self.l2_regularization_strength, 'l2_shrinkage_regularization_strength': self.l2_shrinkage_regularization_strength, 'beta': self.beta}) + return config +Ftrl.__doc__ = Ftrl.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/lamb.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export('keras.optimizers.Lamb') +class Lamb(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='lamb', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._momentums = [] + self._velocities = [] + for var in var_list: + self._momentums.append(self.add_variable_from_reference(reference_variable=var, name='momentum')) + self._velocities.append(self.add_variable_from_reference(reference_variable=var, name='velocity')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + local_step = ops.cast(self.iterations + 1, variable.dtype) + beta_1_power = ops.power(ops.cast(self.beta_1, variable.dtype), local_step) + beta_2_power = ops.power(ops.cast(self.beta_2, variable.dtype), local_step) + m = self._momentums[self._get_variable_index(variable)] + v = self._velocities[self._get_variable_index(variable)] + self.assign_add(m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)) + self.assign_add(v, ops.multiply(ops.subtract(ops.square(gradient), v), 1 - self.beta_2)) + m_t_hat = ops.divide(m, 1.0 - beta_1_power) + v_sqrt = ops.add(ops.sqrt(ops.divide(v, 1.0 - beta_2_power)), self.epsilon) + update = ops.divide(m_t_hat, v_sqrt) + w_norm = ops.sqrt(ops.sum(ops.power(variable, 2))) + g_norm = ops.sqrt(ops.sum(ops.power(update, 2))) + ratio = ops.where(ops.greater(w_norm, 0), ops.where(ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0) + self.assign_sub(variable, ratio * lr * update) + + def get_config(self): + config = super().get_config() + config.update({'beta_1': self.beta_1, 'beta_2': self.beta_2, 'epsilon': self.epsilon}) + return config +Lamb.__doc__ = Lamb.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/lion.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Lion']) +class Lion(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.99, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='lion', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_1 = beta_1 + self.beta_2 = beta_2 + if beta_1 <= 0 or beta_1 > 1: + raise ValueError(f'Argument `beta_1` must be in the [0, 1] range. Otherwise, the optimizer degenerates to SignSGD. Received: beta_1={beta_1}.') + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._momentums = [] + for var in var_list: + self._momentums.append(self.add_variable_from_reference(reference_variable=var, name='momentum')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + beta_1 = ops.cast(self.beta_1, variable.dtype) + beta_2 = ops.cast(self.beta_2, variable.dtype) + m = self._momentums[self._get_variable_index(variable)] + self.assign_sub(variable, ops.multiply(lr, ops.sign(ops.add(ops.multiply(m, beta_1), ops.multiply(gradient, 1.0 - beta_1))))) + self.assign(m, ops.add(ops.multiply(m, beta_2), ops.multiply(gradient, 1.0 - beta_2))) + + def get_config(self): + config = super().get_config() + config.update({'beta_1': self.beta_1, 'beta_2': self.beta_2}) + return config +Lion.__doc__ = Lion.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/loss_scale_optimizer.py +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer +from keras.src.saving import serialization_lib +from keras.src.utils import tracking + +@keras_export(['keras.optimizers.LossScaleOptimizer', 'keras.mixed_precision.LossScaleOptimizer']) +class LossScaleOptimizer(optimizer.Optimizer): + + def __init__(self, inner_optimizer, initial_scale=2.0 ** 15, dynamic_growth_steps=2000, **kwargs): + if not kwargs.pop('dynamic', True): + raise ValueError('LossScaleOptimizer no longer supports `dynamic=False`. Instead, simply set `loss_scale_factor` directly on the `inner_optimizer`.') + super().__init__(learning_rate=0.0, **kwargs) + self.inner_optimizer = inner_optimizer + self.initial_scale = initial_scale + self.dynamic_growth_steps = dynamic_growth_steps + + @tracking.no_automatic_dependency_tracking + def build(self, var_list): + self.step_counter = self.add_variable(shape=(), dtype='int', initializer=initializers.Zeros(), name='step_counter') + self.dynamic_scale = self.add_variable(shape=(), dtype='float32', initializer=initializers.Constant(self.initial_scale), name='dynamic_scale') + self.inner_optimizer.build(var_list) + self.built = True + + @property + def variables(self): + return self._variables + self.inner_optimizer.variables + + def stateless_apply(self, optimizer_variables, grads, trainable_variables): + if not self.built: + raise ValueError(f'To call `stateless_apply`, {self.__class__.__name__} must be built (i.e. its variables must have been created). You can build it via `optimizer.build(trainable_variables)`.') + finite = self.check_finite(grads) + return ops.cond(finite, lambda : self._stateless_handle_finite_grads(optimizer_variables, grads, trainable_variables), lambda : self._stateless_handle_non_finite_grads(optimizer_variables, trainable_variables)) + + def _stateless_handle_finite_grads(self, optimizer_variables, grads, trainable_variables): + + def upscale(): + mapping = list(zip(self.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping) as scope: + self.step_counter.assign(0) + self.dynamic_scale.assign(self.dynamic_scale * 2.0) + return [scope.get_current_value(v) for v in self._variables] + + def increment(): + mapping = list(zip(self.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping) as scope: + self.step_counter.assign_add(1) + return [scope.get_current_value(v) for v in self._variables] + mapping = list(zip(self.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping): + own_variables = ops.cond(ops.equal(self.step_counter, self.dynamic_growth_steps - 1), upscale, increment) + scale = self.dynamic_scale + unscaled_grads = [g if g is None else ops.divide(g, scale) for g in grads] + (new_trainable_variables, new_inner_variables) = self.inner_optimizer.stateless_apply(self.inner_optimizer.variables, unscaled_grads, trainable_variables) + new_optimizer_variables = own_variables + new_inner_variables + return (new_trainable_variables, new_optimizer_variables) + + def _stateless_handle_non_finite_grads(self, optimizer_variables, trainable_variables): + mapping = list(zip(self.variables, optimizer_variables)) + with backend.StatelessScope(state_mapping=mapping) as scope: + self.step_counter.assign(0) + self.dynamic_scale.assign(self.dynamic_scale / 2.0) + new_optimizer_variables = [] + for v in self.variables: + new_optimizer_variables.append(scope.get_current_value(v)) + return (trainable_variables, new_optimizer_variables) + + def apply(self, grads, trainable_variables=None): + if not self.built: + with backend.name_scope(self.name, caller=self): + self.build(trainable_variables) + self.built = True + if backend.backend() == 'tensorflow': + self._tf_apply(grads, trainable_variables) + else: + self._common_apply(grads, trainable_variables) + + def _stateful_handle_finite_grads(self, grads, trainable_variables): + scale = self.dynamic_scale + unscaled_grads = [g if g is None else ops.divide(g, scale) for g in grads] + self.inner_optimizer.apply(unscaled_grads, trainable_variables=trainable_variables) + + def upscale(): + self.step_counter.assign(0) + self.dynamic_scale.assign(self.dynamic_scale * 2.0) + + def increment(): + self.step_counter.assign_add(1) + ops.cond(ops.equal(self.step_counter, self.dynamic_growth_steps - 1), upscale, increment) + + def _stateful_handle_non_finite_grads(self): + self.step_counter.assign(0) + self.dynamic_scale.assign(self.dynamic_scale / 2.0) + + def _common_apply(self, grads, trainable_variables=None): + finite = self.check_finite(grads) + ops.cond(finite, lambda : self._stateful_handle_finite_grads(grads, trainable_variables), self._stateful_handle_non_finite_grads) + + def _tf_apply(self, grads, trainable_variables=None): + from keras.src.utils.module_utils import tensorflow as tf + if tf.distribute.in_cross_replica_context(): + raise ValueError('apply() must be called in a replica context.') + if tf.__internal__.distribute.strategy_supports_no_merge_call(): + self._common_apply(grads, trainable_variables=trainable_variables) + else: + + def _handle_cross_replica(distribution, grads, trainable_variables): + finite_per_replica = distribution.extended.call_for_each_replica(self.check_finite, args=(grads,)) + finite = distribution.experimental_local_results(finite_per_replica)[0] + + def apply_fn(): + distribution.extended.call_for_each_replica(self._stateful_handle_finite_grads, args=(grads, trainable_variables)) + ops.cond(finite, apply_fn, self._stateful_handle_non_finite_grads) + tf.distribute.get_replica_context().merge_call(_handle_cross_replica, args=(grads, trainable_variables)) + + def check_finite(self, grads): + tensor_grads = [g for g in grads if g is not None] + finite_grads = [ops.all(ops.isfinite(g)) for g in tensor_grads] + return ops.all(ops.convert_to_tensor(finite_grads)) + + @property + def learning_rate(self): + return self.inner_optimizer.learning_rate + + @learning_rate.setter + def learning_rate(self, learning_rate): + self.inner_optimizer.learning_rate = learning_rate + + def scale_loss(self, loss): + scale = self.dynamic_scale if self.built else self.initial_scale + return loss * scale + + def finalize_variable_values(self, var_list): + self.inner_optimizer.finalize_variable_values(var_list) + + def get_config(self): + config = super().get_config() + inner_optimizer_config = serialization_lib.serialize_keras_object(self.inner_optimizer) + config.update({'inner_optimizer': inner_optimizer_config, 'initial_scale': self.initial_scale, 'dynamic_growth_steps': self.dynamic_growth_steps}) + del config['learning_rate'] + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + inner_optimizer = serialization_lib.deserialize_keras_object(config.pop('inner_optimizer'), custom_objects=custom_objects) + return cls(inner_optimizer, **config) +LossScaleOptimizer.__doc__ = LossScaleOptimizer.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/nadam.py +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.Nadam']) +class Nadam(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='nadam', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def build(self, var_list): + if self.built: + return + if var_list: + dtype = var_list[0].dtype + else: + dtype = backend.floatx() + super().build(var_list) + self._momentums = [] + self._velocities = [] + self._u_product = backend.Variable(1.0, dtype=dtype) + for var in var_list: + self._momentums.append(self.add_variable_from_reference(reference_variable=var, name='momentum')) + self._velocities.append(self.add_variable_from_reference(reference_variable=var, name='velocity')) + + def _backend_update_step(self, grads, trainable_variables, learning_rate): + dtype = self._u_product.dtype + self.assign(self._u_product, self._u_product * self.beta_1 * (1.0 - 0.5 * ops.power(0.96, ops.cast(self.iterations + 1, dtype)))) + super()._backend_update_step(grads, trainable_variables, learning_rate) + + def update_step(self, gradient, variable, learning_rate): + var_dtype = variable.dtype + lr = ops.cast(learning_rate, var_dtype) + gradient = ops.cast(gradient, var_dtype) + local_step = ops.cast(self.iterations + 1, var_dtype) + next_step = ops.cast(self.iterations + 2, var_dtype) + decay = ops.cast(0.96, var_dtype) + beta_1 = ops.cast(self.beta_1, var_dtype) + beta_2 = ops.cast(self.beta_2, var_dtype) + u_t = beta_1 * (1.0 - 0.5 * ops.power(decay, local_step)) + u_t_1 = beta_1 * (1.0 - 0.5 * ops.power(decay, next_step)) + u_product_t = ops.cast(self._u_product, var_dtype) + u_product_t_1 = u_product_t * u_t_1 + beta_2_power = ops.power(beta_2, local_step) + m = self._momentums[self._get_variable_index(variable)] + v = self._velocities[self._get_variable_index(variable)] + self.assign_add(m, ops.multiply(ops.subtract(gradient, m), 1 - beta_1)) + self.assign_add(v, ops.multiply(ops.subtract(ops.square(gradient), v), 1 - beta_2)) + m_hat = ops.add(ops.divide(ops.multiply(u_t_1, m), 1 - u_product_t_1), ops.divide(ops.multiply(1 - u_t, gradient), 1 - u_product_t)) + v_hat = ops.divide(v, 1 - beta_2_power) + self.assign_sub(variable, ops.divide(ops.multiply(m_hat, lr), ops.add(ops.sqrt(v_hat), self.epsilon))) + + def get_config(self): + config = super().get_config() + config.update({'beta_1': self.beta_1, 'beta_2': self.beta_2, 'epsilon': self.epsilon}) + return config +Nadam.__doc__ = Nadam.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/optimizer.py +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.optimizers import base_optimizer +if backend.backend() == 'tensorflow': + from keras.src.backend.tensorflow.optimizer import TFOptimizer as BackendOptimizer +elif backend.backend() == 'torch': + from keras.src.backend.torch.optimizers import TorchOptimizer as BackendOptimizer +elif backend.backend() == 'jax': + from keras.src.backend.jax.optimizer import JaxOptimizer as BackendOptimizer +else: + + class BackendOptimizer(base_optimizer.BaseOptimizer): + pass + +@keras_export(['keras.Optimizer', 'keras.optimizers.Optimizer']) +class Optimizer(BackendOptimizer, base_optimizer.BaseOptimizer): + pass +base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args + +# File: keras-master/keras/src/optimizers/rmsprop.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export(['keras.optimizers.RMSprop']) +class RMSprop(optimizer.Optimizer): + + def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='rmsprop', **kwargs): + super().__init__(learning_rate=learning_rate, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs) + self.rho = rho + self.momentum = momentum + self.epsilon = epsilon + self.centered = centered + + def build(self, var_list): + if self.built: + return + super().build(var_list) + self._velocities = [] + for var in var_list: + self._velocities.append(self.add_variable_from_reference(var, 'velocity')) + self._momentums = [] + if self.momentum > 0: + for var in var_list: + self._momentums.append(self.add_variable_from_reference(var, 'momentum')) + self._average_gradients = [] + if self.centered: + for var in var_list: + self._average_gradients.append(self.add_variable_from_reference(var, 'average_gradient')) + + def update_step(self, gradient, variable, learning_rate): + lr = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + velocity = self._velocities[self._get_variable_index(variable)] + momentum = None + if self.momentum > 0: + momentum = self._momentums[self._get_variable_index(variable)] + average_grad = None + if self.centered: + average_grad = self._average_gradients[self._get_variable_index(variable)] + rho = self.rho + self.assign(velocity, ops.add(ops.multiply(rho, velocity), ops.multiply(1 - rho, ops.square(gradient)))) + if self.centered: + self.assign(average_grad, ops.add(ops.multiply(rho, average_grad), ops.multiply(1 - rho, gradient))) + denominator = velocity - ops.square(average_grad) + self.epsilon + else: + denominator = ops.add(velocity, self.epsilon) + increment = ops.divide(ops.multiply(lr, gradient), ops.sqrt(denominator)) + if self.momentum > 0: + self.assign(momentum, ops.add(ops.multiply(self.momentum, momentum), increment)) + self.assign_sub(variable, momentum) + else: + self.assign_sub(variable, increment) + + def get_config(self): + config = super().get_config() + config.update({'rho': self.rho, 'momentum': self.momentum, 'epsilon': self.epsilon, 'centered': self.centered}) + return config +RMSprop.__doc__ = RMSprop.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/optimizers/schedules/__init__.py +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecayRestarts +from keras.src.optimizers.schedules.learning_rate_schedule import ExponentialDecay +from keras.src.optimizers.schedules.learning_rate_schedule import InverseTimeDecay +from keras.src.optimizers.schedules.learning_rate_schedule import PiecewiseConstantDecay +from keras.src.optimizers.schedules.learning_rate_schedule import PolynomialDecay + +# File: keras-master/keras/src/optimizers/schedules/learning_rate_schedule.py +"""""" +import math +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.saving import serialization_lib + +@keras_export('keras.optimizers.schedules.LearningRateSchedule') +class LearningRateSchedule: + + def __call__(self, step): + raise NotImplementedError(f"Learning rate schedule '{self.__class__.__name__}' must override `__call__(self, step)`.") + + def get_config(self): + raise NotImplementedError(f"Learning rate schedule '{self.__class__.__name__}' must override `get_config()` in order to be serializable.") + + @classmethod + def from_config(cls, config): + return cls(**config) + +@keras_export('keras.optimizers.schedules.ExponentialDecay') +class ExponentialDecay(LearningRateSchedule): + + def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name='ExponentialDecay'): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.decay_steps = decay_steps + self.decay_rate = decay_rate + self.staircase = staircase + self.name = name + if self.decay_steps <= 0: + raise ValueError(f'Argument `decay_steps` must be > 0. Received: decay_steps={self.decay_steps}') + + def __call__(self, step): + with ops.name_scope(self.name): + initial_learning_rate = ops.convert_to_tensor(self.initial_learning_rate) + dtype = initial_learning_rate.dtype + decay_steps = ops.cast(self.decay_steps, dtype) + decay_rate = ops.cast(self.decay_rate, dtype) + global_step_recomp = ops.cast(step, dtype) + p = global_step_recomp / decay_steps + if self.staircase: + p = ops.floor(p) + return ops.multiply(initial_learning_rate, ops.power(decay_rate, p)) + + def get_config(self): + return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'decay_rate': self.decay_rate, 'staircase': self.staircase, 'name': self.name} + +@keras_export('keras.optimizers.schedules.PiecewiseConstantDecay') +class PiecewiseConstantDecay(LearningRateSchedule): + + def __init__(self, boundaries, values, name='PiecewiseConstant'): + super().__init__() + if len(boundaries) != len(values) - 1: + raise ValueError(f'The length of boundaries should be 1 less than the length of values. Received: boundaries={boundaries} of length {len(boundaries)}, and values={values} of length {len(values)}.') + self.boundaries = boundaries + self.values = values + self.name = name + + def __call__(self, step): + with ops.name_scope(self.name): + boundaries = [ops.convert_to_tensor(x) for x in self.boundaries] + values = [ops.convert_to_tensor(x) for x in self.values] + step = ops.convert_to_tensor(step) + for (i, b) in enumerate(boundaries): + if b.dtype != step.dtype: + b = ops.cast(b, step.dtype) + boundaries[i] = b + result_dtype = values[0].dtype + result_value = ops.array(0, dtype=result_dtype) + step_less_than_first_boundary = ops.cast(step <= boundaries[0], result_dtype) + result_value += step_less_than_first_boundary * values[0] + step_greater_than_last_boundary = ops.cast(step > boundaries[-1], result_dtype) + result_value += step_greater_than_last_boundary * values[-1] + for (low, high, value) in zip(boundaries[:-1], boundaries[1:], values[1:-1]): + step_in_range = ops.cast((step > low) & (step <= high), result_dtype) + result_value += step_in_range * value + return result_value + + def get_config(self): + return {'boundaries': self.boundaries, 'values': self.values, 'name': self.name} + +@keras_export('keras.optimizers.schedules.PolynomialDecay') +class PolynomialDecay(LearningRateSchedule): + + def __init__(self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name='PolynomialDecay'): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.decay_steps = decay_steps + self.end_learning_rate = end_learning_rate + self.power = power + self.cycle = cycle + self.name = name + if self.decay_steps <= 0: + raise ValueError(f'Argument `decay_steps` must be > 0. Received: decay_steps={self.decay_steps}') + + def __call__(self, step): + with ops.name_scope(self.name): + initial_learning_rate = ops.convert_to_tensor(self.initial_learning_rate) + dtype = initial_learning_rate.dtype + end_learning_rate = ops.cast(self.end_learning_rate, dtype) + power = ops.cast(self.power, dtype) + global_step_recomp = ops.cast(step, dtype) + decay_steps_recomp = ops.cast(self.decay_steps, dtype) + if self.cycle: + multiplier = ops.where(ops.equal(global_step_recomp, 0), 1.0, ops.ceil(global_step_recomp / self.decay_steps)) + decay_steps_recomp = ops.multiply(decay_steps_recomp, multiplier) + else: + global_step_recomp = ops.minimum(global_step_recomp, decay_steps_recomp) + p = ops.divide(global_step_recomp, decay_steps_recomp) + return ops.add(ops.multiply(initial_learning_rate - end_learning_rate, ops.power(1 - p, power)), end_learning_rate) + + def get_config(self): + return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'end_learning_rate': self.end_learning_rate, 'power': self.power, 'cycle': self.cycle, 'name': self.name} + +@keras_export('keras.optimizers.schedules.InverseTimeDecay') +class InverseTimeDecay(LearningRateSchedule): + + def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name='InverseTimeDecay'): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.decay_steps = decay_steps + self.decay_rate = decay_rate + self.staircase = staircase + self.name = name + if self.decay_steps <= 0: + raise ValueError(f'Argument `decay_steps` must be > 0. Received: decay_steps={self.decay_steps}') + + def __call__(self, step): + with ops.name_scope(self.name): + initial_learning_rate = ops.convert_to_tensor(self.initial_learning_rate) + dtype = initial_learning_rate.dtype + decay_steps = ops.cast(self.decay_steps, dtype) + decay_rate = ops.cast(self.decay_rate, dtype) + global_step_recomp = ops.cast(step, dtype) + p = global_step_recomp / decay_steps + if self.staircase: + p = ops.floor(p) + const = ops.cast(ops.array(1), dtype) + denom = ops.add(const, ops.multiply(decay_rate, p)) + return ops.divide(initial_learning_rate, denom) + + def get_config(self): + return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'decay_rate': self.decay_rate, 'staircase': self.staircase, 'name': self.name} + +@keras_export('keras.optimizers.schedules.CosineDecay') +class CosineDecay(LearningRateSchedule): + + def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, name='CosineDecay', warmup_target=None, warmup_steps=0): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.decay_steps = decay_steps + self.alpha = alpha + self.name = name + self.warmup_steps = warmup_steps + self.warmup_target = warmup_target + if self.decay_steps <= 0: + raise ValueError(f'Argument `decay_steps` must be > 0. Received: decay_steps={self.decay_steps}') + + def _decay_function(self, step, decay_steps, decay_from_lr, dtype): + with ops.name_scope(self.name): + completed_fraction = step / decay_steps + pi = ops.array(math.pi, dtype=dtype) + cosine_decayed = 0.5 * (1.0 + ops.cos(pi * completed_fraction)) + decayed = (1 - self.alpha) * cosine_decayed + self.alpha + return ops.multiply(decay_from_lr, decayed) + + def _warmup_function(self, step, warmup_steps, warmup_target, initial_learning_rate): + with ops.name_scope(self.name): + completed_fraction = step / warmup_steps + total_step_delta = warmup_target - initial_learning_rate + return total_step_delta * completed_fraction + initial_learning_rate + + def __call__(self, step): + with ops.name_scope(self.name): + initial_learning_rate = ops.convert_to_tensor(self.initial_learning_rate) + dtype = initial_learning_rate.dtype + decay_steps = ops.cast(self.decay_steps, dtype) + global_step_recomp = ops.cast(step, dtype) + if self.warmup_target is None: + global_step_recomp = ops.minimum(global_step_recomp, decay_steps) + return self._decay_function(global_step_recomp, decay_steps, initial_learning_rate, dtype) + warmup_target = ops.cast(self.warmup_target, dtype) + warmup_steps = ops.cast(self.warmup_steps, dtype) + global_step_recomp = ops.minimum(global_step_recomp, decay_steps + warmup_steps) + return ops.cond(global_step_recomp < warmup_steps, lambda : self._warmup_function(global_step_recomp, warmup_steps, warmup_target, initial_learning_rate), lambda : self._decay_function(global_step_recomp - warmup_steps, decay_steps, warmup_target, dtype)) + + def get_config(self): + return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'alpha': self.alpha, 'name': self.name, 'warmup_target': self.warmup_target, 'warmup_steps': self.warmup_steps} + +@keras_export('keras.optimizers.schedules.CosineDecayRestarts') +class CosineDecayRestarts(LearningRateSchedule): + + def __init__(self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name='SGDRDecay'): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.first_decay_steps = first_decay_steps + self._t_mul = t_mul + self._m_mul = m_mul + self.alpha = alpha + self.name = name + if self.first_decay_steps <= 0: + raise ValueError(f'Argument `first_decay_steps` must be > 0. Received: first_decay_steps={self.first_decay_steps}') + + def __call__(self, step): + with ops.name_scope(self.name): + initial_learning_rate = ops.convert_to_tensor(self.initial_learning_rate) + dtype = initial_learning_rate.dtype + first_decay_steps = ops.cast(self.first_decay_steps, dtype) + alpha = ops.cast(self.alpha, dtype) + t_mul = ops.cast(self._t_mul, dtype) + m_mul = ops.cast(self._m_mul, dtype) + global_step_recomp = ops.cast(step, dtype) + completed_fraction = global_step_recomp / first_decay_steps + + def compute_step(completed_fraction, geometric=False): + if geometric: + i_restart = ops.floor(ops.log(ops.cast(1.0 - completed_fraction * (1.0 - t_mul), dtype)) / ops.log(t_mul)) + sum_r = (1.0 - t_mul ** i_restart) / (1.0 - t_mul) + completed_fraction = (completed_fraction - sum_r) / t_mul ** i_restart + else: + i_restart = ops.floor(completed_fraction) + completed_fraction -= i_restart + return (i_restart, completed_fraction) + (i_restart, completed_fraction) = ops.cond(ops.equal(t_mul, 1.0), lambda : compute_step(completed_fraction, geometric=False), lambda : compute_step(completed_fraction, geometric=True)) + m_fac = m_mul ** i_restart + cosine_decayed = 0.5 * m_fac * (1.0 + ops.cos(ops.array(math.pi, dtype=dtype) * completed_fraction)) + decayed = (1 - alpha) * cosine_decayed + alpha + return ops.multiply(initial_learning_rate, decayed) + + def get_config(self): + return {'initial_learning_rate': self.initial_learning_rate, 'first_decay_steps': self.first_decay_steps, 't_mul': self._t_mul, 'm_mul': self._m_mul, 'alpha': self.alpha, 'name': self.name} + +@keras_export('keras.optimizers.schedules.serialize') +def serialize(learning_rate_schedule): + return serialization_lib.serialize_keras_object(learning_rate_schedule) + +@keras_export('keras.optimizers.schedules.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='decay') + +# File: keras-master/keras/src/optimizers/sgd.py +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer + +@keras_export('keras.optimizers.SGD') +class SGD(optimizer.Optimizer): + + def __init__(self, learning_rate=0.01, momentum=0.0, nesterov=False, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='SGD', **kwargs): + super().__init__(learning_rate=learning_rate, name=name, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, **kwargs) + if not isinstance(momentum, float) or momentum < 0 or momentum > 1: + raise ValueError('`momentum` must be a float between [0, 1].') + self.momentum = momentum + self.nesterov = nesterov + + def build(self, variables): + if self.built: + return + super().build(variables) + self.momentums = [] + if self.momentum != 0: + for variable in variables: + self.momentums.append(self.add_variable_from_reference(reference_variable=variable, name='momentum')) + + def update_step(self, gradient, variable, learning_rate): + learning_rate = ops.cast(learning_rate, variable.dtype) + gradient = ops.cast(gradient, variable.dtype) + m = None + if self.momentum != 0: + m = self.momentums[self._get_variable_index(variable)] + if m is not None: + momentum = ops.cast(self.momentum, variable.dtype) + self.assign(m, ops.subtract(ops.multiply(m, momentum), ops.multiply(gradient, learning_rate))) + if self.nesterov: + self.assign_add(variable, ops.subtract(ops.multiply(m, momentum), ops.multiply(gradient, learning_rate))) + else: + self.assign_add(variable, m) + else: + self.assign_sub(variable, ops.multiply(gradient, learning_rate)) + + def get_config(self): + config = super().get_config() + config.update({'momentum': self.momentum, 'nesterov': self.nesterov}) + return config +SGD.__doc__ = SGD.__doc__.replace('{{base_optimizer_keyword_args}}', optimizer.base_optimizer_keyword_args) + +# File: keras-master/keras/src/quantizers/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case +ALL_OBJECTS = {Quantizer, AbsMaxQuantizer} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}) + +@keras_export('keras.quantizers.serialize') +def serialize(initializer): + return serialization_lib.serialize_keras_object(initializer) + +@keras_export('keras.quantizers.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.quantizers.get') +def get(identifier, **kwargs): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj(kwargs) + return obj + else: + raise ValueError(f'Could not interpret quantizer identifier: {identifier}') + +# File: keras-master/keras/src/quantizers/quantizers.py +import ml_dtypes +import numpy as np +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.backend_utils import standardize_axis_for_numpy +'' + +@keras_export(['keras.Quantizer', 'keras.quantizers.Quantizer']) +class Quantizer: + + def __init__(self, output_dtype='int8'): + self.output_dtype = output_dtype + + def __call__(self, x): + return x + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + raise NotImplementedError(f'{self} does not implement get_config()') + +@keras_export('keras.quantizers.abs_max_quantize') +def abs_max_quantize(inputs, axis, value_range=(-127, 127), dtype='int8', epsilon=backend.epsilon(), to_numpy=False): + if to_numpy: + original_dtype = backend.standardize_dtype(inputs.dtype) + inputs = ops.convert_to_numpy(inputs) + axis = standardize_axis_for_numpy(axis) + scale = np.divide(value_range[1], np.add(np.max(np.abs(inputs), axis=axis, keepdims=True), epsilon)) + outputs = np.multiply(inputs, scale) + outputs = np.clip(np.round(outputs), value_range[0], value_range[1]) + outputs = outputs.astype(dtype) + return (ops.convert_to_tensor(outputs), ops.convert_to_tensor(scale, dtype=original_dtype)) + inputs = ops.convert_to_tensor(inputs) + scale = ops.divide(value_range[1], ops.add(ops.max(ops.abs(inputs), axis=axis, keepdims=True), epsilon)) + scale = ops.cast(scale, backend.standardize_dtype(inputs.dtype)) + outputs = ops.multiply(inputs, scale) + outputs = ops.clip(ops.round(outputs), value_range[0], value_range[1]) + outputs = ops.cast(outputs, dtype) + return (outputs, scale) + +@keras_export('keras.quantizers.AbsMaxQuantizer') +class AbsMaxQuantizer(Quantizer): + + def __init__(self, axis, value_range=(-127, 127), epsilon=backend.epsilon(), output_dtype='int8'): + Quantizer.__init__(self, output_dtype=output_dtype) + if isinstance(axis, int): + axis = (axis,) + self.axis = tuple(axis) + self.value_range = value_range + self.epsilon = epsilon + + def __call__(self, x): + (quantized_x, scale) = abs_max_quantize(x, self.axis, self.value_range, self.output_dtype, self.epsilon) + return (quantized_x, scale) + + def get_config(self): + return {'axis': self.axis, 'value_range': self.value_range, 'epsilon': self.epsilon, 'output_dtype': self.output_dtype} +'' + +@keras_export('keras.quantizers.compute_float8_scale') +def compute_float8_scale(amax, scale, dtype_max, margin=0): + scale = ops.reciprocal(scale) + sf = ops.divide(ops.divide(dtype_max, amax), 2 ** margin) + sf = ops.where(amax > 0.0, sf, scale) + sf = ops.where(ops.isfinite(amax), sf, scale) + return ops.reciprocal(sf) + +@keras_export('keras.quantizers.compute_float8_amax_history') +def compute_float8_amax_history(x, amax_history): + amax_update = ops.cast(ops.max(ops.abs(x)), amax_history.dtype) + new_amax_history = ops.scatter_update(ops.roll(amax_history, shift=-1), [[0]], ops.reshape(amax_update, [1])) + return new_amax_history + +@keras_export('keras.quantizers.quantize_and_dequantize') +def quantize_and_dequantize(inputs, scale, quantized_dtype, compute_dtype): + quantized_dtype_max = ops.cast(float(ml_dtypes.finfo(quantized_dtype).max), compute_dtype) + x = ops.divide(inputs, ops.cast(scale, compute_dtype)) + x = ops.clip(x, -quantized_dtype_max, quantized_dtype_max) + x = ops.cast(x, quantized_dtype) + x = ops.multiply(ops.cast(x, compute_dtype), ops.cast(scale, compute_dtype)) + return x + +# File: keras-master/keras/src/random/random.py +from keras.src import backend +from keras.src.api_export import keras_export + +@keras_export('keras.random.normal') +def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + return backend.random.normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + +@keras_export('keras.random.categorical') +def categorical(logits, num_samples, dtype='int32', seed=None): + logits_shape = list(backend.convert_to_tensor(logits).shape) + if len(logits_shape) != 2: + raise ValueError(f'`logits` should be a 2-D tensor with shape [batch_size, num_classes]. Received: logits={logits}') + return backend.random.categorical(logits, num_samples, dtype=dtype, seed=seed) + +@keras_export('keras.random.uniform') +def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + if dtype and (not backend.is_float_dtype(dtype)): + raise ValueError(f'`keras.random.uniform` requires a floating point `dtype`. Received: dtype={dtype} ') + return backend.random.uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + +@keras_export('keras.random.randint') +def randint(shape, minval, maxval, dtype='int32', seed=None): + if dtype and (not backend.is_int_dtype(dtype)): + raise ValueError(f'`keras.random.randint` requires an integer `dtype`. Received: dtype={dtype} ') + return backend.random.randint(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + +@keras_export('keras.random.truncated_normal') +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + return backend.random.truncated_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + +@keras_export('keras.random.dropout') +def dropout(inputs, rate, noise_shape=None, seed=None): + return backend.random.dropout(inputs, rate, noise_shape=noise_shape, seed=seed) + +@keras_export('keras.random.shuffle') +def shuffle(x, axis=0, seed=None): + return backend.random.shuffle(x, axis=axis, seed=seed) + +@keras_export('keras.random.gamma') +def gamma(shape, alpha, dtype=None, seed=None): + return backend.random.gamma(shape, alpha=alpha, dtype=dtype, seed=seed) + +@keras_export('keras.random.binomial') +def binomial(shape, counts, probabilities, dtype=None, seed=None): + return backend.random.binomial(shape, counts=counts, probabilities=probabilities, dtype=dtype, seed=seed) + +@keras_export('keras.random.beta') +def beta(shape, alpha, beta, dtype=None, seed=None): + return backend.random.beta(shape=shape, alpha=alpha, beta=beta, dtype=dtype, seed=seed) + +# File: keras-master/keras/src/random/seed_generator.py +import random as python_random +import numpy as np +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.utils import jax_utils +from keras.src.utils.naming import auto_name + +@keras_export('keras.random.SeedGenerator') +class SeedGenerator: + + def __init__(self, seed=None, name=None, **kwargs): + if name is None: + name = auto_name(self.__class__.__name__) + self.name = name + custom_backend = kwargs.pop('backend', None) + if kwargs: + raise ValueError(f'Unrecognized keyword arguments: {kwargs}') + if custom_backend is not None: + self.backend = custom_backend + else: + self.backend = backend + self._initial_seed = seed + if seed is None: + seed = make_default_seed() + if not isinstance(seed, int): + raise ValueError(f'Argument `seed` must be an integer. Received: seed={seed}') + + def seed_initializer(*args, **kwargs): + dtype = kwargs.get('dtype', None) + return self.backend.convert_to_tensor([seed, 0], dtype=dtype) + with backend.name_scope(self.name, caller=self): + self.state = self.backend.Variable(seed_initializer, shape=(2,), dtype=self.backend.random_seed_dtype(), trainable=False, name='seed_generator_state') + + def next(self, ordered=True): + seed_state = self.state + new_seed_value = seed_state.value * 1 + if ordered: + increment = self.backend.convert_to_tensor(np.array([0, 1]), dtype=seed_state.dtype) + self.state.assign(self.backend.numpy.add(seed_state, increment)) + else: + self.state.assign((seed_state + 1) * 5387 % 933199) + return new_seed_value + + def get_config(self): + return {'seed': self._initial_seed} + + @classmethod + def from_config(cls, config): + return cls(**config) + +def global_seed_generator(): + if jax_utils.is_in_jax_tracing_scope(): + raise ValueError('[JAX RNG] When tracing a JAX function, you should only use seeded random ops, e.g. you should create a `SeedGenerator` instance, attach it to your layer/model, and pass the instance as the `seed` argument when calling random ops. Unseeded random ops would get incorrectly traced by JAX and would become constant after tracing. Example:\n\n```\n# Make sure to set the seed generator as a layer attribute\nself.seed_generator = keras.random.SeedGenerator(seed=1337)\n...\nout = keras.random.normal(shape=(1,), seed=self.seed_generator)\n```') + gen = global_state.get_global_attribute('global_seed_generator') + if gen is None: + gen = SeedGenerator() + global_state.set_global_attribute('global_seed_generator', gen) + return gen + +def make_default_seed(): + return python_random.randint(1, int(1000000000.0)) + +def draw_seed(seed): + from keras.src.backend import convert_to_tensor + from keras.src.backend import random_seed_dtype + if isinstance(seed, SeedGenerator): + return seed.next() + elif isinstance(seed, int): + return convert_to_tensor([seed, 0], dtype=random_seed_dtype()) + elif seed is None: + return global_seed_generator().next(ordered=False) + raise ValueError(f'Argument `seed` must be either an integer or an instance of `SeedGenerator`. Received: seed={seed} (of type {type(seed)})') + +# File: keras-master/keras/src/regularizers/__init__.py +import inspect +from keras.src.api_export import keras_export +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case +ALL_OBJECTS = {Regularizer, L1, L2, L1L2, OrthogonalRegularizer} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update({to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}) + +@keras_export('keras.regularizers.serialize') +def serialize(regularizer): + return serialization_lib.serialize_keras_object(regularizer) + +@keras_export('keras.regularizers.deserialize') +def deserialize(config, custom_objects=None): + return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) + +@keras_export('keras.regularizers.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f'Could not interpret regularizer identifier: {identifier}') + +# File: keras-master/keras/src/regularizers/regularizers.py +import math +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.utils.numerical_utils import normalize + +@keras_export(['keras.Regularizer', 'keras.regularizers.Regularizer']) +class Regularizer: + + def __call__(self, x): + return 0.0 + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + raise NotImplementedError(f'{self} does not implement get_config()') + +@keras_export(['keras.regularizers.L1L2', 'keras.regularizers.l1_l2']) +class L1L2(Regularizer): + + def __init__(self, l1=0.0, l2=0.0): + l1 = 0.0 if l1 is None else l1 + l2 = 0.0 if l2 is None else l2 + validate_float_arg(l1, name='l1') + validate_float_arg(l2, name='l2') + self.l1 = l1 + self.l2 = l2 + + def __call__(self, x): + regularization = ops.convert_to_tensor(0.0, dtype=x.dtype) + if self.l1: + regularization += self.l1 * ops.sum(ops.absolute(x)) + if self.l2: + regularization += self.l2 * ops.sum(ops.square(x)) + return regularization + + def get_config(self): + return {'l1': float(self.l1), 'l2': float(self.l2)} + +@keras_export(['keras.regularizers.L1', 'keras.regularizers.l1']) +class L1(Regularizer): + + def __init__(self, l1=0.01): + l1 = 0.01 if l1 is None else l1 + validate_float_arg(l1, name='l1') + self.l1 = ops.convert_to_tensor(l1) + + def __call__(self, x): + return self.l1 * ops.sum(ops.absolute(x)) + + def get_config(self): + return {'l1': float(self.l1)} + +@keras_export(['keras.regularizers.L2', 'keras.regularizers.l2']) +class L2(Regularizer): + + def __init__(self, l2=0.01): + l2 = 0.01 if l2 is None else l2 + validate_float_arg(l2, name='l2') + self.l2 = l2 + + def __call__(self, x): + return self.l2 * ops.sum(ops.square(x)) + + def get_config(self): + return {'l2': float(self.l2)} + +@keras_export(['keras.regularizers.OrthogonalRegularizer', 'keras.regularizers.orthogonal_regularizer']) +class OrthogonalRegularizer(Regularizer): + + def __init__(self, factor=0.01, mode='rows'): + validate_float_arg(factor, name='factor') + self.factor = ops.convert_to_tensor(factor) + if mode not in {'rows', 'columns'}: + raise ValueError(f'Invalid value for argument `mode`. Expected one of {{"rows", "columns"}}. Received: mode={mode}') + self.mode = mode + + def __call__(self, inputs): + if len(inputs.shape) != 2: + raise ValueError(f'Inputs to OrthogonalRegularizer must have rank 2. Received: inputs.shape={inputs.shape}') + if self.mode == 'rows': + inputs = normalize(inputs, axis=1) + product = ops.matmul(inputs, ops.transpose(inputs)) + size = inputs.shape[0] + else: + inputs = normalize(inputs, axis=0) + product = ops.matmul(ops.transpose(inputs), inputs) + size = inputs.shape[1] + product_no_diagonal = product * (1.0 - ops.eye(size, dtype=inputs.dtype)) + num_pairs = size * (size - 1.0) / 2.0 + return self.factor * 0.5 * ops.sum(ops.absolute(product_no_diagonal)) / num_pairs + + def get_config(self): + return {'factor': float(self.factor), 'mode': self.mode} + +def validate_float_arg(value, name): + if not isinstance(value, (float, int)) or (math.isinf(value) or math.isnan(value)) or value < 0: + raise ValueError(f'Invalid value for argument {name}: expected a non-negative float.Received: {name}={value}') + return float(value) + +# File: keras-master/keras/src/saving/keras_saveable.py +import io + +class KerasSaveable: + + def _obj_type(self): + raise NotImplementedError('KerasSaveable subclases must provide an implementation for `obj_type()`') + + @classmethod + def _unpickle_model(cls, bytesio): + import keras.src.saving.saving_lib as saving_lib + return saving_lib._load_model_from_fileobj(bytesio, custom_objects=None, compile=True, safe_mode=False) + + def __reduce__(self): + import keras.src.saving.saving_lib as saving_lib + buf = io.BytesIO() + saving_lib._save_model_to_fileobj(self, buf, 'h5') + return (self._unpickle_model, (buf,)) + +# File: keras-master/keras/src/saving/object_registration.py +import inspect +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +GLOBAL_CUSTOM_OBJECTS = {} +GLOBAL_CUSTOM_NAMES = {} + +@keras_export(['keras.saving.CustomObjectScope', 'keras.saving.custom_object_scope', 'keras.utils.CustomObjectScope', 'keras.utils.custom_object_scope']) +class CustomObjectScope: + + def __init__(self, custom_objects): + self.custom_objects = custom_objects or {} + self.backup = None + + def __enter__(self): + self.backup = global_state.get_global_attribute('custom_objects_scope_dict', {}).copy() + global_state.set_global_attribute('custom_objects_scope_dict', self.custom_objects.copy()) + return self + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('custom_objects_scope_dict', self.backup.copy()) +custom_object_scope = CustomObjectScope + +@keras_export(['keras.saving.get_custom_objects', 'keras.utils.get_custom_objects']) +def get_custom_objects(): + return GLOBAL_CUSTOM_OBJECTS + +@keras_export(['keras.saving.register_keras_serializable', 'keras.utils.register_keras_serializable']) +def register_keras_serializable(package='Custom', name=None): + + def decorator(arg): + class_name = name if name is not None else arg.__name__ + registered_name = package + '>' + class_name + if inspect.isclass(arg) and (not hasattr(arg, 'get_config')): + raise ValueError('Cannot register a class that does not have a get_config() method.') + GLOBAL_CUSTOM_OBJECTS[registered_name] = arg + GLOBAL_CUSTOM_NAMES[arg] = registered_name + return arg + return decorator + +@keras_export(['keras.saving.get_registered_name', 'keras.utils.get_registered_name']) +def get_registered_name(obj): + if obj in GLOBAL_CUSTOM_NAMES: + return GLOBAL_CUSTOM_NAMES[obj] + else: + return obj.__name__ + +@keras_export(['keras.saving.get_registered_object', 'keras.utils.get_registered_object']) +def get_registered_object(name, custom_objects=None, module_objects=None): + custom_objects_scope_dict = global_state.get_global_attribute('custom_objects_scope_dict', {}) + if name in custom_objects_scope_dict: + return custom_objects_scope_dict[name] + elif name in GLOBAL_CUSTOM_OBJECTS: + return GLOBAL_CUSTOM_OBJECTS[name] + elif custom_objects and name in custom_objects: + return custom_objects[name] + elif module_objects and name in module_objects: + return module_objects[name] + return None + +# File: keras-master/keras/src/saving/saving_api.py +import os +import zipfile +from absl import logging +from keras.src.api_export import keras_export +from keras.src.legacy.saving import legacy_h5_format +from keras.src.saving import saving_lib +from keras.src.utils import file_utils +from keras.src.utils import io_utils +try: + import h5py +except ImportError: + h5py = None + +@keras_export(['keras.saving.save_model', 'keras.models.save_model']) +def save_model(model, filepath, overwrite=True, zipped=None, **kwargs): + include_optimizer = kwargs.pop('include_optimizer', True) + save_format = kwargs.pop('save_format', False) + if save_format: + if str(filepath).endswith(('.h5', '.hdf5')) or str(filepath).endswith('.keras'): + logging.warning(f'The `save_format` argument is deprecated in Keras 3. We recommend removing this argument as it can be inferred from the file path. Received: save_format={save_format}') + else: + raise ValueError(f'The `save_format` argument is deprecated in Keras 3. Please remove this argument and pass a file path with either `.keras` or `.h5` extension.Received: save_format={save_format}') + if kwargs: + raise ValueError(f'The following argument(s) are not supported: {list(kwargs.keys())}') + if str(filepath).endswith(('.h5', '.hdf5')): + logging.warning("You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. ") + is_hf = str(filepath).startswith('hf://') + if zipped is None: + zipped = not is_hf + try: + exists = not is_hf and os.path.exists(filepath) + except TypeError: + exists = False + if exists and (not overwrite): + proceed = io_utils.ask_to_proceed_with_overwrite(filepath) + if not proceed: + return + if zipped and str(filepath).endswith('.keras'): + return saving_lib.save_model(model, filepath) + if not zipped: + return saving_lib.save_model(model, filepath, zipped=False) + if str(filepath).endswith(('.h5', '.hdf5')): + return legacy_h5_format.save_model_to_hdf5(model, filepath, overwrite, include_optimizer) + raise ValueError(f'Invalid filepath extension for saving. Please add either a `.keras` extension for the native Keras format (recommended) or a `.h5` extension. Use `model.export(filepath)` if you want to export a SavedModel for use with TFLite/TFServing/etc. Received: filepath={filepath}.') + +@keras_export(['keras.saving.load_model', 'keras.models.load_model']) +def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): + is_keras_zip = str(filepath).endswith('.keras') and zipfile.is_zipfile(filepath) + is_keras_dir = file_utils.isdir(filepath) and file_utils.exists(file_utils.join(filepath, 'config.json')) + is_hf = str(filepath).startswith('hf://') + if file_utils.is_remote_path(filepath) and (not file_utils.isdir(filepath)) and (not is_keras_zip) and (not is_hf): + local_path = file_utils.join(saving_lib.get_temp_dir(), os.path.basename(filepath)) + file_utils.copy(filepath, local_path) + if zipfile.is_zipfile(local_path): + filepath = local_path + is_keras_zip = True + if is_keras_zip or is_keras_dir or is_hf: + return saving_lib.load_model(filepath, custom_objects=custom_objects, compile=compile, safe_mode=safe_mode) + if str(filepath).endswith(('.h5', '.hdf5')): + return legacy_h5_format.load_model_from_hdf5(filepath, custom_objects=custom_objects, compile=compile) + elif str(filepath).endswith('.keras'): + raise ValueError(f'File not found: filepath={filepath}. Please ensure the file is an accessible `.keras` zip file.') + else: + raise ValueError(f"File format not supported: filepath={filepath}. Keras 3 only supports V3 `.keras` files and legacy H5 format files (`.h5` extension). Note that the legacy SavedModel format is not supported by `load_model()` in Keras 3. In order to reload a TensorFlow SavedModel as an inference-only layer in Keras 3, use `keras.layers.TFSMLayer({filepath}, call_endpoint='serving_default')` (note that your `call_endpoint` might have a different name).") + +@keras_export('keras.saving.save_weights') +def save_weights(model, filepath, overwrite=True, **kwargs): + if not str(filepath).endswith('.weights.h5'): + raise ValueError(f'The filename must end in `.weights.h5`. Received: filepath={filepath}') + try: + exists = os.path.exists(filepath) + except TypeError: + exists = False + if exists and (not overwrite): + proceed = io_utils.ask_to_proceed_with_overwrite(filepath) + if not proceed: + return + saving_lib.save_weights_only(model, filepath, **kwargs) + +@keras_export('keras.saving.load_weights') +def load_weights(model, filepath, skip_mismatch=False, **kwargs): + if str(filepath).endswith('.keras'): + if kwargs: + raise ValueError(f'Invalid keyword arguments: {kwargs}') + saving_lib.load_weights_only(model, filepath, skip_mismatch=skip_mismatch) + elif str(filepath).endswith('.weights.h5'): + objects_to_skip = kwargs.pop('objects_to_skip', None) + if kwargs: + raise ValueError(f'Invalid keyword arguments: {kwargs}') + saving_lib.load_weights_only(model, filepath, skip_mismatch=skip_mismatch, objects_to_skip=objects_to_skip) + elif str(filepath).endswith('.h5') or str(filepath).endswith('.hdf5'): + by_name = kwargs.pop('by_name', False) + if kwargs: + raise ValueError(f'Invalid keyword arguments: {kwargs}') + if not h5py: + raise ImportError('Loading a H5 file requires `h5py` to be installed.') + with h5py.File(filepath, 'r') as f: + if 'layer_names' not in f.attrs and 'model_weights' in f: + f = f['model_weights'] + if by_name: + legacy_h5_format.load_weights_from_hdf5_group_by_name(f, model, skip_mismatch) + else: + legacy_h5_format.load_weights_from_hdf5_group(f, model) + else: + raise ValueError(f'File format not supported: filepath={filepath}. Keras 3 only supports V3 `.keras` and `.weights.h5` files, or legacy V1/V2 `.h5` files.') + +# File: keras-master/keras/src/saving/saving_lib.py +"""""" +import datetime +import io +import json +import pathlib +import tempfile +import warnings +import zipfile +import ml_dtypes +import numpy as np +from keras.src import backend +from keras.src.backend.common import global_state +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.optimizers.optimizer import Optimizer +from keras.src.saving.serialization_lib import ObjectSharingScope +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object +from keras.src.trainers.compile_utils import CompileMetrics +from keras.src.utils import file_utils +from keras.src.utils import io_utils +from keras.src.utils import naming +from keras.src.utils import plot_model +from keras.src.utils.model_visualization import check_pydot +from keras.src.utils.summary_utils import weight_memory_size +from keras.src.version import __version__ as keras_version +try: + import h5py +except ImportError: + h5py = None +try: + import psutil +except ImportError: + psutil = None +try: + import huggingface_hub +except ImportError: + huggingface_hub = None +_CONFIG_FILENAME = 'config.json' +_METADATA_FILENAME = 'metadata.json' +_VARS_FNAME = 'model.weights' +_VARS_FNAME_H5 = _VARS_FNAME + '.h5' +_VARS_FNAME_NPZ = _VARS_FNAME + '.npz' +_ASSETS_DIRNAME = 'assets' +_MEMORY_UPPER_BOUND = 0.5 +_MODEL_CARD_TEMPLATE = '\n---\nlibrary_name: keras\n---\n\nThis model has been uploaded using the Keras library and can be used with JAX,\nTensorFlow, and PyTorch backends.\n\nThis model card has been generated automatically and should be completed by the\nmodel author.\nSee [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for\nmore information.\n\nFor more details about the model architecture, check out\n[config.json](./config.json).' + +def save_model(model, filepath, weights_format='h5', zipped=True): + if weights_format == 'h5' and h5py is None: + raise ImportError('h5py must be installed in order to save a model.') + if not model.built: + warnings.warn('You are saving a model that has not yet been built. It might not contain any weights yet. Consider building the model first by calling it on some data.', stacklevel=2) + if isinstance(filepath, io.IOBase): + _save_model_to_fileobj(model, filepath, weights_format) + return + filepath = str(filepath) + is_hf = filepath.startswith('hf://') + if zipped and (not filepath.endswith('.keras')): + raise ValueError(f'Invalid `filepath` argument: expected a `.keras` extension. Received: filepath={filepath}') + if not zipped and filepath.endswith('.keras'): + raise ValueError(f'When using `zipped=False`, the `filepath` argument should not end in `.keras`. Received: filepath={filepath}') + if zipped and is_hf: + raise ValueError(f'When saving to the Hugging Face Hub, you should not save the model as zipped. Received: filepath={filepath}, zipped={zipped}') + if is_hf: + _upload_model_to_hf(model, filepath, weights_format) + elif not zipped: + _save_model_to_dir(model, filepath, weights_format) + elif file_utils.is_remote_path(filepath): + zip_filepath = io.BytesIO() + _save_model_to_fileobj(model, zip_filepath, weights_format) + with file_utils.File(filepath, 'wb') as f: + f.write(zip_filepath.getvalue()) + else: + with open(filepath, 'wb') as f: + _save_model_to_fileobj(model, f, weights_format) + +def _serialize_model_as_json(model): + with ObjectSharingScope(): + serialized_model_dict = serialize_keras_object(model) + config_json = json.dumps(serialized_model_dict) + metadata_json = json.dumps({'keras_version': keras_version, 'date_saved': datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}) + return (config_json, metadata_json) + +def _save_model_to_dir(model, dirpath, weights_format): + if not file_utils.exists(dirpath): + file_utils.makedirs(dirpath) + (config_json, metadata_json) = _serialize_model_as_json(model) + with open(file_utils.join(dirpath, _METADATA_FILENAME), 'w') as f: + f.write(metadata_json) + with open(file_utils.join(dirpath, _CONFIG_FILENAME), 'w') as f: + f.write(config_json) + weights_filepath = file_utils.join(dirpath, _VARS_FNAME_H5) + assert_dirpath = file_utils.join(dirpath, _ASSETS_DIRNAME) + try: + if weights_format == 'h5': + weights_store = H5IOStore(weights_filepath, mode='w') + elif weights_format == 'npz': + weights_store = NpzIOStore(weights_filepath, mode='w') + else: + raise ValueError(f"Unknown `weights_format` argument. Expected 'h5' or 'npz'. Received: weights_format={weights_format}") + asset_store = DiskIOStore(assert_dirpath, mode='w') + _save_state(model, weights_store=weights_store, assets_store=asset_store, inner_path='', visited_saveables=set()) + finally: + weights_store.close() + asset_store.close() + +def _save_model_to_fileobj(model, fileobj, weights_format): + (config_json, metadata_json) = _serialize_model_as_json(model) + with zipfile.ZipFile(fileobj, 'w') as zf: + with zf.open(_METADATA_FILENAME, 'w') as f: + f.write(metadata_json.encode()) + with zf.open(_CONFIG_FILENAME, 'w') as f: + f.write(config_json.encode()) + weights_file_path = None + weights_store = None + asset_store = None + write_zf = False + try: + if weights_format == 'h5': + try: + if is_memory_sufficient(model): + weights_store = H5IOStore(_VARS_FNAME_H5, archive=zf, mode='w') + else: + working_dir = pathlib.Path(fileobj.name).parent + weights_file_path = tempfile.NamedTemporaryFile(dir=working_dir) + weights_store = H5IOStore(weights_file_path.name, mode='w') + write_zf = True + except: + weights_store = H5IOStore(_VARS_FNAME_H5, archive=zf, mode='w') + elif weights_format == 'npz': + weights_store = NpzIOStore(_VARS_FNAME_NPZ, archive=zf, mode='w') + else: + raise ValueError(f"Unknown `weights_format` argument. Expected 'h5' or 'npz'. Received: weights_format={weights_format}") + asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode='w') + _save_state(model, weights_store=weights_store, assets_store=asset_store, inner_path='', visited_saveables=set()) + except: + write_zf = False + if weights_store: + weights_store.archive = None + raise + finally: + if weights_store: + weights_store.close() + if asset_store: + asset_store.close() + if write_zf and weights_file_path: + zf.write(weights_file_path.name, _VARS_FNAME_H5) + if weights_file_path: + weights_file_path.close() + +def _upload_model_to_hf(model, hf_path, weights_format): + if huggingface_hub is None: + raise ImportError('To save models to the Hugging Face Hub, you must install the `huggingface_hub` package.') + original_hf_path = hf_path + if hf_path.startswith('hf://'): + hf_path = hf_path[5:] + if hf_path.count('/') > 1: + raise ValueError(f'Invalid `hf_path` argument: expected `namespace/model_name` format. Received: hf_path={original_hf_path}') + api = huggingface_hub.HfApi(library_name='keras', library_version=keras_version) + repo_url = api.create_repo(hf_path, exist_ok=True) + repo_id = repo_url.repo_id + with tempfile.TemporaryDirectory() as tmp_dir: + _save_model_to_dir(model, tmp_dir, weights_format) + model_card = _MODEL_CARD_TEMPLATE + if check_pydot(): + plot_path = file_utils.join(tmp_dir, 'assets', 'summary_plot.png') + plot_model(model, to_file=plot_path, show_layer_names=True, show_shapes=True, show_dtype=True) + if len(model.layers) <= 10: + model_card += '\n\n![](./assets/summary_plot.png)' + else: + model_card += 'A plot of the model can be found [here](./assets/summary_plot.png).' + with open(file_utils.join(tmp_dir, 'README.md'), 'w') as f: + f.write(model_card) + api.upload_folder(repo_id=repo_id, folder_path=tmp_dir, commit_message='Save model using Keras.') + io_utils.print_msg(f"Model saved to the Hugging Face Hub: {repo_url}\nTo load back the model, use `keras.saving.load_model('hf://{repo_id}')`") + +def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): + if isinstance(filepath, io.IOBase): + return _load_model_from_fileobj(filepath, custom_objects, compile, safe_mode) + elif str(filepath).startswith('hf://'): + if huggingface_hub is None: + raise ImportError('To load models from the Hugging Face Hub, you must install the `huggingface_hub` package.') + repo_id = filepath[5:] + folder_path = huggingface_hub.snapshot_download(repo_id=repo_id, library_name='keras', library_version=keras_version) + return _load_model_from_dir(folder_path, custom_objects, compile, safe_mode) + else: + filepath = str(filepath) + if not filepath.endswith('.keras'): + is_keras_dir = file_utils.isdir(filepath) and file_utils.exists(file_utils.join(filepath, 'config.json')) + if is_keras_dir: + return _load_model_from_dir(filepath, custom_objects, compile, safe_mode) + raise ValueError(f'Invalid filename: expected a `.keras` extension. Received: filepath={filepath}') + with open(filepath, 'rb') as f: + return _load_model_from_fileobj(f, custom_objects, compile, safe_mode) + +def _load_model_from_dir(dirpath, custom_objects, compile, safe_mode): + if not file_utils.exists(dirpath): + raise ValueError(f"Directory doesn't exist: {dirpath}") + if not file_utils.isdir(dirpath): + raise ValueError(f"Path isn't a directory: {dirpath}") + with open(file_utils.join(dirpath, _CONFIG_FILENAME), 'r') as f: + config_json = f.read() + model = _model_from_config(config_json, custom_objects, compile, safe_mode) + all_filenames = file_utils.listdir(dirpath) + try: + if _VARS_FNAME_H5 in all_filenames: + weights_file_path = file_utils.join(dirpath, _VARS_FNAME_H5) + weights_store = H5IOStore(weights_file_path, mode='r') + elif _VARS_FNAME_NPZ in all_filenames: + weights_file_path = file_utils.join(dirpath, _VARS_FNAME_NPZ) + weights_store = NpzIOStore(weights_file_path, mode='r') + else: + raise ValueError(f'Expected a {_VARS_FNAME_H5} or {_VARS_FNAME_NPZ} file.') + if len(all_filenames) > 3: + asset_store = DiskIOStore(file_utils.join(dirpath, _ASSETS_DIRNAME), mode='r') + else: + asset_store = None + failed_saveables = set() + error_msgs = {} + _load_state(model, weights_store=weights_store, assets_store=asset_store, inner_path='', visited_saveables=set(), failed_saveables=failed_saveables, error_msgs=error_msgs) + finally: + weights_store.close() + if asset_store: + asset_store.close() + if failed_saveables: + _raise_loading_failure(error_msgs) + return model + +def _model_from_config(config_json, custom_objects, compile, safe_mode): + config_dict = json.loads(config_json) + if not compile: + config_dict['compile_config'] = None + with ObjectSharingScope(): + model = deserialize_keras_object(config_dict, custom_objects, safe_mode=safe_mode) + return model + +def _load_model_from_fileobj(fileobj, custom_objects, compile, safe_mode): + with zipfile.ZipFile(fileobj, 'r') as zf: + with zf.open(_CONFIG_FILENAME, 'r') as f: + config_json = f.read() + model = _model_from_config(config_json, custom_objects, compile, safe_mode) + all_filenames = zf.namelist() + extract_dir = None + weights_store = None + asset_store = None + try: + if _VARS_FNAME_H5 in all_filenames: + try: + if is_memory_sufficient(model): + io_file = io.BytesIO(zf.open(_VARS_FNAME_H5, 'r').read()) + weights_store = H5IOStore(io_file, mode='r') + else: + extract_dir = tempfile.TemporaryDirectory(dir=pathlib.Path(fileobj.name).parent) + zf.extract(_VARS_FNAME_H5, extract_dir.name) + weights_store = H5IOStore(pathlib.Path(extract_dir.name, _VARS_FNAME_H5), mode='r') + except: + weights_store = H5IOStore(_VARS_FNAME_H5, zf, mode='r') + elif _VARS_FNAME_NPZ in all_filenames: + weights_store = NpzIOStore(_VARS_FNAME_NPZ, zf, mode='r') + else: + raise ValueError(f'Expected a {_VARS_FNAME_H5} or {_VARS_FNAME_NPZ} file.') + if len(all_filenames) > 3: + asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode='r') + failed_saveables = set() + error_msgs = {} + _load_state(model, weights_store=weights_store, assets_store=asset_store, inner_path='', visited_saveables=set(), failed_saveables=failed_saveables, error_msgs=error_msgs) + finally: + if weights_store: + weights_store.close() + if asset_store: + asset_store.close() + if extract_dir: + extract_dir.cleanup() + if failed_saveables: + _raise_loading_failure(error_msgs) + return model + +def save_weights_only(model, filepath, objects_to_skip=None): + filepath = str(filepath) + if not filepath.endswith('.weights.h5'): + raise ValueError(f'Invalid `filepath` argument: expected a `.weights.h5` extension. Received: filepath={filepath}') + weights_store = H5IOStore(filepath, mode='w') + if objects_to_skip is not None: + visited_saveables = set((id(o) for o in objects_to_skip)) + else: + visited_saveables = set() + _save_state(model, weights_store=weights_store, assets_store=None, inner_path='', visited_saveables=visited_saveables) + weights_store.close() + +def load_weights_only(model, filepath, skip_mismatch=False, objects_to_skip=None): + archive = None + filepath = str(filepath) + if filepath.endswith('.weights.h5'): + weights_store = H5IOStore(filepath, mode='r') + elif filepath.endswith('.keras'): + archive = zipfile.ZipFile(filepath, 'r') + weights_store = H5IOStore(_VARS_FNAME_H5, archive=archive, mode='r') + failed_saveables = set() + if objects_to_skip is not None: + visited_saveables = set((id(o) for o in objects_to_skip)) + else: + visited_saveables = set() + error_msgs = {} + _load_state(model, weights_store=weights_store, assets_store=None, inner_path='', skip_mismatch=skip_mismatch, visited_saveables=visited_saveables, failed_saveables=failed_saveables, error_msgs=error_msgs) + weights_store.close() + if archive: + archive.close() + if failed_saveables: + _raise_loading_failure(error_msgs, warn_only=skip_mismatch) + +def _raise_loading_failure(error_msgs, warn_only=False): + first_key = list(error_msgs.keys())[0] + (ex_saveable, ex_error) = error_msgs[first_key] + msg = f'A total of {len(error_msgs)} objects could not be loaded. Example error message for object {ex_saveable}:\n\n{ex_error}\n\nList of objects that could not be loaded:\n{[x[0] for x in error_msgs.values()]}' + if warn_only: + warnings.warn(msg) + else: + raise ValueError(msg) + +def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path): + if not file_utils.isdir(system_path): + zipfile_to_save.write(system_path, zip_path) + else: + for file_name in file_utils.listdir(system_path): + system_file_path = file_utils.join(system_path, file_name).replace('\\', '/') + zip_file_path = file_utils.join(zip_path, file_name).replace('\\', '/') + _write_to_zip_recursively(zipfile_to_save, system_file_path, zip_file_path) + +def _name_key(name): + if name.startswith('_'): + return '~' + name + return name + +def _walk_saveable(saveable): + from keras.src.saving.keras_saveable import KerasSaveable + if not isinstance(saveable, KerasSaveable): + raise ValueError(f'Expected object to be an instance of `KerasSaveable`, but got {saveable} of type {type(saveable)}') + obj_type = saveable._obj_type() + attr_skipset = get_attr_skipset(obj_type) + if obj_type in ('Sequential', 'Functional'): + yield ('layers', saveable.layers) + for child_attr in sorted(dir(saveable), key=lambda x: _name_key(x)): + if child_attr.startswith('__') or child_attr in attr_skipset: + continue + try: + child_obj = getattr(saveable, child_attr) + except Exception: + continue + yield (child_attr, child_obj) + +def _save_state(saveable, weights_store, assets_store, inner_path, visited_saveables): + from keras.src.saving.keras_saveable import KerasSaveable + if id(saveable) in visited_saveables: + return + if hasattr(saveable, 'save_own_variables') and weights_store: + saveable.save_own_variables(weights_store.make(inner_path)) + if hasattr(saveable, 'save_assets') and assets_store: + saveable.save_assets(assets_store.make(inner_path)) + visited_saveables.add(id(saveable)) + for (child_attr, child_obj) in _walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): + _save_state(child_obj, weights_store, assets_store, inner_path=file_utils.join(inner_path, child_attr).replace('\\', '/'), visited_saveables=visited_saveables) + elif isinstance(child_obj, (list, dict, tuple, set)): + _save_container_state(child_obj, weights_store, assets_store, inner_path=file_utils.join(inner_path, child_attr).replace('\\', '/'), visited_saveables=visited_saveables) + +def _load_state(saveable, weights_store, assets_store, inner_path, skip_mismatch=False, visited_saveables=None, failed_saveables=None, error_msgs=None): + from keras.src.saving.keras_saveable import KerasSaveable + if visited_saveables and id(saveable) in visited_saveables: + return + failure = False + if hasattr(saveable, 'load_own_variables') and weights_store: + if skip_mismatch or failed_saveables is not None: + try: + saveable.load_own_variables(weights_store.get(inner_path)) + except Exception as e: + failed_saveables.add(id(saveable)) + error_msgs[id(saveable)] = (saveable, e) + failure = True + else: + saveable.load_own_variables(weights_store.get(inner_path)) + if hasattr(saveable, 'load_assets') and assets_store: + if skip_mismatch or failed_saveables is not None: + try: + saveable.load_assets(assets_store.get(inner_path)) + except Exception as e: + failed_saveables.add(id(saveable)) + error_msgs[id(saveable)] = (saveable, e) + failure = True + else: + saveable.load_assets(assets_store.get(inner_path)) + if failed_saveables is not None: + currently_failed = len(failed_saveables) + else: + currently_failed = 0 + for (child_attr, child_obj) in _walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): + _load_state(child_obj, weights_store, assets_store, inner_path=file_utils.join(inner_path, child_attr).replace('\\', '/'), skip_mismatch=skip_mismatch, visited_saveables=visited_saveables, failed_saveables=failed_saveables, error_msgs=error_msgs) + elif isinstance(child_obj, (list, dict, tuple, set)): + _load_container_state(child_obj, weights_store, assets_store, inner_path=file_utils.join(inner_path, child_attr).replace('\\', '/'), skip_mismatch=skip_mismatch, visited_saveables=visited_saveables, failed_saveables=failed_saveables, error_msgs=error_msgs) + if failed_saveables is not None: + newly_failed = len(failed_saveables) - currently_failed + else: + newly_failed = 0 + if not failure: + if visited_saveables is not None and newly_failed <= 0: + visited_saveables.add(id(saveable)) + if id(saveable) in failed_saveables: + failed_saveables.remove(id(saveable)) + error_msgs.pop(id(saveable)) + +def _save_container_state(container, weights_store, assets_store, inner_path, visited_saveables): + from keras.src.saving.keras_saveable import KerasSaveable + used_names = {} + if isinstance(container, dict): + container = list(container.values()) + for saveable in container: + if isinstance(saveable, KerasSaveable): + name = naming.to_snake_case(saveable.__class__.__name__) + if name in used_names: + used_names[name] += 1 + name = f'{name}_{used_names[name]}' + else: + used_names[name] = 0 + _save_state(saveable, weights_store, assets_store, inner_path=file_utils.join(inner_path, name).replace('\\', '/'), visited_saveables=visited_saveables) + +def _load_container_state(container, weights_store, assets_store, inner_path, skip_mismatch, visited_saveables, failed_saveables, error_msgs): + from keras.src.saving.keras_saveable import KerasSaveable + used_names = {} + if isinstance(container, dict): + container = list(container.values()) + for saveable in container: + if isinstance(saveable, KerasSaveable): + name = naming.to_snake_case(saveable.__class__.__name__) + if name in used_names: + used_names[name] += 1 + name = f'{name}_{used_names[name]}' + else: + used_names[name] = 0 + _load_state(saveable, weights_store, assets_store, inner_path=file_utils.join(inner_path, name).replace('\\', '/'), skip_mismatch=skip_mismatch, visited_saveables=visited_saveables, failed_saveables=failed_saveables, error_msgs=error_msgs) + +class DiskIOStore: + + def __init__(self, root_path, archive=None, mode=None): + self.mode = mode + self.root_path = root_path + self.archive = archive + self.tmp_dir = None + if self.archive: + self.tmp_dir = get_temp_dir() + if self.mode == 'r': + self.archive.extractall(path=self.tmp_dir) + self.working_dir = file_utils.join(self.tmp_dir, self.root_path).replace('\\', '/') + if self.mode == 'w': + file_utils.makedirs(self.working_dir) + elif mode == 'r': + self.working_dir = root_path + else: + self.tmp_dir = get_temp_dir() + self.working_dir = file_utils.join(self.tmp_dir, self.root_path).replace('\\', '/') + file_utils.makedirs(self.working_dir) + + def make(self, path): + if not path: + return self.working_dir + path = file_utils.join(self.working_dir, path).replace('\\', '/') + if not file_utils.exists(path): + file_utils.makedirs(path) + return path + + def get(self, path): + if not path: + return self.working_dir + path = file_utils.join(self.working_dir, path).replace('\\', '/') + if file_utils.exists(path): + return path + return None + + def close(self): + if self.mode == 'w' and self.archive: + _write_to_zip_recursively(self.archive, self.working_dir, self.root_path) + if self.tmp_dir and file_utils.exists(self.tmp_dir): + file_utils.rmtree(self.tmp_dir) + +class H5IOStore: + + def __init__(self, root_path, archive=None, mode='r'): + self.root_path = root_path + self.mode = mode + self.archive = archive + self.io_file = None + if self.archive: + if self.mode == 'w': + self.io_file = io.BytesIO() + else: + self.io_file = self.archive.open(self.root_path, 'r') + self.h5_file = h5py.File(self.io_file, mode=self.mode) + else: + self.h5_file = h5py.File(root_path, mode=self.mode) + + def make(self, path): + return H5Entry(self.h5_file, path, mode='w') + + def get(self, path): + return H5Entry(self.h5_file, path, mode='r') + + def close(self): + self.h5_file.close() + if self.mode == 'w' and self.archive: + self.archive.writestr(self.root_path, self.io_file.getvalue()) + if self.io_file: + self.io_file.close() + +class H5Entry: + + def __init__(self, h5_file, path, mode): + self.h5_file = h5_file + self.path = path + self.mode = mode + if mode == 'w': + if not path: + self.group = self.h5_file.create_group('vars') + else: + self.group = self.h5_file.create_group(self.path).create_group('vars') + else: + found = False + if not path: + self.group = self.h5_file['vars'] + found = True + elif path in self.h5_file and 'vars' in self.h5_file[path]: + self.group = self.h5_file[path]['vars'] + found = True + elif '_layer_checkpoint_dependencies' in self.h5_file: + path = path.replace('layers', '_layer_checkpoint_dependencies') + self.path = path + if path in self.h5_file and 'vars' in self.h5_file[path]: + self.group = self.h5_file[path]['vars'] + found = True + if not found: + self.group = {} + + def __len__(self): + return self.group.__len__() + + def keys(self): + return self.group.keys() + + def items(self): + return self.group.items() + + def values(self): + return self.group.values() + + def __setitem__(self, key, value): + if self.mode != 'w': + raise ValueError('Setting a value is only allowed in write mode.') + value = backend.convert_to_numpy(value) + if backend.standardize_dtype(value.dtype) == 'bfloat16': + ds = self.group.create_dataset(key, data=value) + ds.attrs['dtype'] = 'bfloat16' + else: + self.group[key] = value + + def __getitem__(self, name): + value = self.group[name] + if 'dtype' in value.attrs and value.attrs['dtype'] == 'bfloat16': + value = np.array(value, dtype=ml_dtypes.bfloat16) + return value + +class NpzIOStore: + + def __init__(self, root_path, archive=None, mode='r'): + self.root_path = root_path + self.mode = mode + self.archive = archive + if mode == 'w': + self.contents = {} + else: + if self.archive: + self.f = archive.open(root_path, mode='r') + else: + self.f = open(root_path, mode='rb') + self.contents = np.load(self.f, allow_pickle=True) + + def make(self, path): + if not path: + self.contents['__root__'] = {} + return self.contents['__root__'] + self.contents[path] = {} + return self.contents[path] + + def get(self, path): + if not path: + if '__root__' in self.contents: + return dict(self.contents['__root__']) + return {} + if path in self.contents: + return self.contents[path].tolist() + return {} + + def close(self): + if self.mode == 'w': + if self.archive: + self.f = self.archive.open(self.root_path, mode='w', force_zip64=True) + else: + self.f = open(self.root_path, mode='wb') + np.savez(self.f, **self.contents) + self.f.close() + +def get_temp_dir(): + temp_dir = tempfile.mkdtemp() + testfile = tempfile.TemporaryFile(dir=temp_dir) + testfile.close() + return temp_dir + +def get_attr_skipset(obj_type): + skipset = global_state.get_global_attribute(f'saving_attr_skiplist_{obj_type}', None) + if skipset is not None: + return skipset + skipset = set(['_self_unconditional_dependency_names']) + if obj_type == 'Layer': + ref_obj = Layer() + skipset.update(dir(ref_obj)) + elif obj_type == 'Functional': + ref_obj = Layer() + skipset.update(dir(ref_obj) + ['operations', '_operations']) + elif obj_type == 'Sequential': + ref_obj = Layer() + skipset.update(dir(ref_obj) + ['_functional']) + elif obj_type == 'Metric': + ref_obj_a = Metric() + ref_obj_b = CompileMetrics([], []) + skipset.update(dir(ref_obj_a) + dir(ref_obj_b)) + elif obj_type == 'Optimizer': + ref_obj = Optimizer(1.0) + skipset.update(dir(ref_obj)) + skipset.remove('variables') + elif obj_type == 'Loss': + ref_obj = Loss() + skipset.update(dir(ref_obj)) + else: + raise ValueError(f"get_attr_skipset got invalid obj_type={obj_type!r}. Accepted values for `obj_type` are ['Layer', 'Functional', 'Sequential', 'Metric', 'Optimizer', 'Loss']") + global_state.set_global_attribute(f'saving_attr_skipset_{obj_type}', skipset) + return skipset + +def is_memory_sufficient(model): + if psutil is None: + available_memory = 1024 * 1024 * 1024 + else: + available_memory = psutil.virtual_memory().available + return weight_memory_size(model.variables) < available_memory * _MEMORY_UPPER_BOUND + +# File: keras-master/keras/src/saving/serialization_lib.py +"""""" +import importlib +import inspect +import types +import warnings +import numpy as np +from keras.src import api_export +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.saving import object_registration +from keras.src.utils import python_utils +from keras.src.utils.module_utils import tensorflow as tf +PLAIN_TYPES = (str, int, float, bool) +BUILTIN_MODULES = ('activations', 'constraints', 'initializers', 'losses', 'metrics', 'optimizers', 'regularizers') + +class SerializableDict: + + def __init__(self, **config): + self.config = config + + def serialize(self): + return serialize_keras_object(self.config) + +class SafeModeScope: + + def __init__(self, safe_mode=True): + self.safe_mode = safe_mode + + def __enter__(self): + self.original_value = in_safe_mode() + global_state.set_global_attribute('safe_mode_saving', self.safe_mode) + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('safe_mode_saving', self.original_value) + +@keras_export('keras.config.enable_unsafe_deserialization') +def enable_unsafe_deserialization(): + global_state.set_global_attribute('safe_mode_saving', False) + +def in_safe_mode(): + return global_state.get_global_attribute('safe_mode_saving') + +class ObjectSharingScope: + + def __enter__(self): + global_state.set_global_attribute('shared_objects/id_to_obj_map', {}) + global_state.set_global_attribute('shared_objects/id_to_config_map', {}) + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute('shared_objects/id_to_obj_map', None) + global_state.set_global_attribute('shared_objects/id_to_config_map', None) + +def get_shared_object(obj_id): + id_to_obj_map = global_state.get_global_attribute('shared_objects/id_to_obj_map') + if id_to_obj_map is not None: + return id_to_obj_map.get(obj_id, None) + +def record_object_after_serialization(obj, config): + if config['module'] == '__main__': + config['module'] = None + id_to_config_map = global_state.get_global_attribute('shared_objects/id_to_config_map') + if id_to_config_map is None: + return + obj_id = int(id(obj)) + if obj_id not in id_to_config_map: + id_to_config_map[obj_id] = config + else: + config['shared_object_id'] = obj_id + prev_config = id_to_config_map[obj_id] + prev_config['shared_object_id'] = obj_id + +def record_object_after_deserialization(obj, obj_id): + id_to_obj_map = global_state.get_global_attribute('shared_objects/id_to_obj_map') + if id_to_obj_map is None: + return + id_to_obj_map[obj_id] = obj + +@keras_export(['keras.saving.serialize_keras_object', 'keras.utils.serialize_keras_object']) +def serialize_keras_object(obj): + if obj is None: + return obj + if isinstance(obj, PLAIN_TYPES): + return obj + if isinstance(obj, (list, tuple)): + config_arr = [serialize_keras_object(x) for x in obj] + return tuple(config_arr) if isinstance(obj, tuple) else config_arr + if isinstance(obj, dict): + return serialize_dict(obj) + if isinstance(obj, bytes): + return {'class_name': '__bytes__', 'config': {'value': obj.decode('utf-8')}} + if isinstance(obj, slice): + return {'class_name': '__slice__', 'config': {'start': serialize_keras_object(obj.start), 'stop': serialize_keras_object(obj.stop), 'step': serialize_keras_object(obj.step)}} + if isinstance(obj, type(Ellipsis)): + return {'class_name': '__ellipsis__', 'config': {}} + if isinstance(obj, backend.KerasTensor): + history = getattr(obj, '_keras_history', None) + if history: + history = list(history) + history[0] = history[0].name + return {'class_name': '__keras_tensor__', 'config': {'shape': obj.shape, 'dtype': obj.dtype, 'keras_history': history}} + if tf.available and isinstance(obj, tf.TensorShape): + return obj.as_list() if obj._dims is not None else None + if backend.is_tensor(obj): + return {'class_name': '__tensor__', 'config': {'value': backend.convert_to_numpy(obj).tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}} + if type(obj).__module__ == np.__name__: + if isinstance(obj, np.ndarray) and obj.ndim > 0: + return {'class_name': '__numpy__', 'config': {'value': obj.tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}} + else: + return obj.item() + if tf.available and isinstance(obj, tf.DType): + return obj.name + if isinstance(obj, types.FunctionType) and obj.__name__ == '': + warnings.warn(f'The object being serialized includes a `lambda`. This is unsafe. In order to reload the object, you will have to pass `safe_mode=False` to the loading function. Please avoid using `lambda` in the future, and use named Python functions instead. This is the `lambda` being serialized: {inspect.getsource(obj)}', stacklevel=2) + return {'class_name': '__lambda__', 'config': {'value': python_utils.func_dump(obj)}} + if tf.available and isinstance(obj, tf.TypeSpec): + ts_config = obj._serialize() + ts_config = list(map(lambda x: x.as_list() if isinstance(x, tf.TensorShape) else x.name if isinstance(x, tf.DType) else x, ts_config)) + return {'class_name': '__typespec__', 'spec_name': obj.__class__.__name__, 'module': obj.__class__.__module__, 'config': ts_config, 'registered_name': None} + inner_config = _get_class_or_fn_config(obj) + config_with_public_class = serialize_with_public_class(obj.__class__, inner_config) + if config_with_public_class is not None: + get_build_and_compile_config(obj, config_with_public_class) + record_object_after_serialization(obj, config_with_public_class) + return config_with_public_class + if isinstance(obj, types.FunctionType): + module = obj.__module__ + else: + module = obj.__class__.__module__ + class_name = obj.__class__.__name__ + if module == 'builtins': + registered_name = None + elif isinstance(obj, types.FunctionType): + registered_name = object_registration.get_registered_name(obj) + else: + registered_name = object_registration.get_registered_name(obj.__class__) + config = {'module': module, 'class_name': class_name, 'config': inner_config, 'registered_name': registered_name} + get_build_and_compile_config(obj, config) + record_object_after_serialization(obj, config) + return config + +def get_build_and_compile_config(obj, config): + if hasattr(obj, 'get_build_config'): + build_config = obj.get_build_config() + if build_config is not None: + config['build_config'] = serialize_dict(build_config) + if hasattr(obj, 'get_compile_config'): + compile_config = obj.get_compile_config() + if compile_config is not None: + config['compile_config'] = serialize_dict(compile_config) + return + +def serialize_with_public_class(cls, inner_config=None): + keras_api_name = api_export.get_name_from_symbol(cls) + if keras_api_name is None: + registered_name = object_registration.get_registered_name(cls) + if registered_name is None: + return None + return {'module': cls.__module__, 'class_name': cls.__name__, 'config': inner_config, 'registered_name': registered_name} + parts = keras_api_name.split('.') + return {'module': '.'.join(parts[:-1]), 'class_name': parts[-1], 'config': inner_config, 'registered_name': None} + +def serialize_with_public_fn(fn, config, fn_module_name=None): + if fn_module_name: + return {'module': fn_module_name, 'class_name': 'function', 'config': config, 'registered_name': config} + keras_api_name = api_export.get_name_from_symbol(fn) + if keras_api_name: + parts = keras_api_name.split('.') + return {'module': '.'.join(parts[:-1]), 'class_name': 'function', 'config': config, 'registered_name': config} + else: + registered_name = object_registration.get_registered_name(fn) + if not registered_name and (not fn.__module__ == 'builtins'): + return None + return {'module': fn.__module__, 'class_name': 'function', 'config': config, 'registered_name': registered_name} + +def _get_class_or_fn_config(obj): + if isinstance(obj, types.FunctionType): + return obj.__name__ + if hasattr(obj, 'get_config'): + config = obj.get_config() + if not isinstance(config, dict): + raise TypeError(f'The `get_config()` method of {obj} should return a dict. It returned: {config}') + return serialize_dict(config) + elif hasattr(obj, '__name__'): + return object_registration.get_registered_name(obj) + else: + raise TypeError(f'Cannot serialize object {obj} of type {type(obj)}. To be serializable, a class must implement the `get_config()` method.') + +def serialize_dict(obj): + return {key: serialize_keras_object(value) for (key, value) in obj.items()} + +@keras_export(['keras.saving.deserialize_keras_object', 'keras.utils.deserialize_keras_object']) +def deserialize_keras_object(config, custom_objects=None, safe_mode=True, **kwargs): + safe_scope_arg = in_safe_mode() + safe_mode = safe_scope_arg if safe_scope_arg is not None else safe_mode + module_objects = kwargs.pop('module_objects', None) + custom_objects = custom_objects or {} + tlco = global_state.get_global_attribute('custom_objects_scope_dict', {}) + gco = object_registration.GLOBAL_CUSTOM_OBJECTS + custom_objects = {**custom_objects, **tlco, **gco} + if config is None: + return None + if isinstance(config, str) and custom_objects and (custom_objects.get(config) is not None): + return custom_objects[config] + if isinstance(config, (list, tuple)): + return [deserialize_keras_object(x, custom_objects=custom_objects, safe_mode=safe_mode) for x in config] + if module_objects is not None: + (inner_config, fn_module_name, has_custom_object) = (None, None, False) + if isinstance(config, dict): + if 'config' in config: + inner_config = config['config'] + if 'class_name' not in config: + raise ValueError(f'Unknown `config` as a `dict`, config={config}') + if custom_objects and (config['class_name'] in custom_objects or config.get('registered_name') in custom_objects or (isinstance(inner_config, str) and inner_config in custom_objects)): + has_custom_object = True + elif config['class_name'] == 'function': + fn_module_name = config['module'] + if fn_module_name == 'builtins': + config = config['config'] + else: + config = config['registered_name'] + else: + if config.get('module', '_') is None: + raise TypeError(f"Cannot deserialize object of type `{config['class_name']}`. If `{config['class_name']}` is a custom class, please register it using the `@keras.saving.register_keras_serializable()` decorator.") + config = config['class_name'] + if not has_custom_object: + if config not in module_objects: + return config + if isinstance(module_objects[config], types.FunctionType): + return deserialize_keras_object(serialize_with_public_fn(module_objects[config], config, fn_module_name), custom_objects=custom_objects) + return deserialize_keras_object(serialize_with_public_class(module_objects[config], inner_config=inner_config), custom_objects=custom_objects) + if isinstance(config, PLAIN_TYPES): + return config + if not isinstance(config, dict): + raise TypeError(f'Could not parse config: {config}') + if 'class_name' not in config or 'config' not in config: + return {key: deserialize_keras_object(value, custom_objects=custom_objects, safe_mode=safe_mode) for (key, value) in config.items()} + class_name = config['class_name'] + inner_config = config['config'] or {} + custom_objects = custom_objects or {} + if class_name == '__keras_tensor__': + obj = backend.KerasTensor(inner_config['shape'], dtype=inner_config['dtype']) + obj._pre_serialization_keras_history = inner_config['keras_history'] + return obj + if class_name == '__tensor__': + return backend.convert_to_tensor(inner_config['value'], dtype=inner_config['dtype']) + if class_name == '__numpy__': + return np.array(inner_config['value'], dtype=inner_config['dtype']) + if config['class_name'] == '__bytes__': + return inner_config['value'].encode('utf-8') + if config['class_name'] == '__ellipsis__': + return Ellipsis + if config['class_name'] == '__slice__': + return slice(deserialize_keras_object(inner_config['start'], custom_objects=custom_objects, safe_mode=safe_mode), deserialize_keras_object(inner_config['stop'], custom_objects=custom_objects, safe_mode=safe_mode), deserialize_keras_object(inner_config['step'], custom_objects=custom_objects, safe_mode=safe_mode)) + if config['class_name'] == '__lambda__': + if safe_mode: + raise ValueError('Requested the deserialization of a `lambda` object. This carries a potential risk of arbitrary code execution and thus it is disallowed by default. If you trust the source of the saved model, you can pass `safe_mode=False` to the loading function in order to allow `lambda` loading, or call `keras.config.enable_unsafe_deserialization()`.') + return python_utils.func_load(inner_config['value']) + if tf is not None and config['class_name'] == '__typespec__': + obj = _retrieve_class_or_fn(config['spec_name'], config['registered_name'], config['module'], obj_type='class', full_config=config, custom_objects=custom_objects) + inner_config = map(lambda x: tf.TensorShape(x) if isinstance(x, list) else getattr(tf, x) if hasattr(tf.dtypes, str(x)) else x, inner_config) + return obj._deserialize(tuple(inner_config)) + module = config.get('module', None) + registered_name = config.get('registered_name', class_name) + if class_name == 'function': + fn_name = inner_config + return _retrieve_class_or_fn(fn_name, registered_name, module, obj_type='function', full_config=config, custom_objects=custom_objects) + if 'shared_object_id' in config: + obj = get_shared_object(config['shared_object_id']) + if obj is not None: + return obj + cls = _retrieve_class_or_fn(class_name, registered_name, module, obj_type='class', full_config=config, custom_objects=custom_objects) + if isinstance(cls, types.FunctionType): + return cls + if not hasattr(cls, 'from_config'): + raise TypeError(f"Unable to reconstruct an instance of '{class_name}' because the class is missing a `from_config()` method. Full object config: {config}") + custom_obj_scope = object_registration.CustomObjectScope(custom_objects) + safe_mode_scope = SafeModeScope(safe_mode) + with custom_obj_scope, safe_mode_scope: + try: + instance = cls.from_config(inner_config) + except TypeError as e: + raise TypeError(f"{cls} could not be deserialized properly. Please ensure that components that are Python object instances (layers, models, etc.) returned by `get_config()` are explicitly deserialized in the model's `from_config()` method.\n\nconfig={config}.\n\nException encountered: {e}") + build_config = config.get('build_config', None) + if build_config and (not instance.built): + instance.build_from_config(build_config) + instance.built = True + compile_config = config.get('compile_config', None) + if compile_config: + instance.compile_from_config(compile_config) + instance.compiled = True + if 'shared_object_id' in config: + record_object_after_deserialization(instance, config['shared_object_id']) + return instance + +def _retrieve_class_or_fn(name, registered_name, module, obj_type, full_config, custom_objects=None): + if obj_type == 'function': + custom_obj = object_registration.get_registered_object(name, custom_objects=custom_objects) + else: + custom_obj = object_registration.get_registered_object(registered_name, custom_objects=custom_objects) + if custom_obj is not None: + return custom_obj + if module: + if module == 'keras' or module.startswith('keras.'): + api_name = module + '.' + name + obj = api_export.get_symbol_from_name(api_name) + if obj is not None: + return obj + if obj_type == 'function' and module == 'builtins': + for mod in BUILTIN_MODULES: + obj = api_export.get_symbol_from_name('keras.' + mod + '.' + name) + if obj is not None: + return obj + filtered_dict = {k: v for (k, v) in custom_objects.items() if k.endswith(full_config['config'])} + if filtered_dict: + return next(iter(filtered_dict.values())) + try: + mod = importlib.import_module(module) + except ModuleNotFoundError: + raise TypeError(f"Could not deserialize {obj_type} '{name}' because its parent module {module} cannot be imported. Full object config: {full_config}") + obj = vars(mod).get(name, None) + if obj is None and registered_name is not None: + obj = vars(mod).get(registered_name, None) + if obj is not None: + return obj + raise TypeError(f"Could not locate {obj_type} '{name}'. Make sure custom classes are decorated with `@keras.saving.register_keras_serializable()`. Full object config: {full_config}") + +# File: keras-master/keras/src/trainers/compile_utils.py +from keras.src import losses as losses_module +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import tree +from keras.src.utils.naming import get_object_name +from keras.src.utils.tracking import Tracker + +class MetricsList(metrics_module.Metric): + + def __init__(self, metrics, name='metrics_list', output_name=None): + super().__init__(name=name) + self.metrics = metrics + self.output_name = output_name + + def update_state(self, y_true, y_pred, sample_weight=None): + for m in self.metrics: + m.update_state(y_true, y_pred, sample_weight=sample_weight) + + def reset_state(self): + for m in self.metrics: + m.reset_state() + + def get_result(self): + return {m.name: m.result() for m in self.metrics} + + def get_config(self): + raise NotImplementedError + + @classmethod + def from_config(cls, config): + raise NotImplementedError + +def is_function_like(value): + if value is None: + return True + if isinstance(value, str): + return True + if callable(value): + return True + return False + +def is_binary_or_sparse_categorical(y_true, y_pred): + y_t_rank = len(y_true.shape) + y_p_rank = len(y_pred.shape) + y_t_last_dim = y_true.shape[-1] + y_p_last_dim = y_pred.shape[-1] + is_binary = y_p_last_dim == 1 + is_sparse_categorical = y_t_rank < y_p_rank or (y_t_last_dim == 1 and y_p_last_dim > 1) + return (is_binary, is_sparse_categorical) + +def get_metric(identifier, y_true, y_pred): + if identifier is None: + return None + if str(identifier).lower() not in ['accuracy', 'acc']: + metric_obj = metrics_module.get(identifier) + else: + (is_binary, is_sparse_categorical) = is_binary_or_sparse_categorical(y_true, y_pred) + if is_binary: + metric_obj = metrics_module.BinaryAccuracy(name=str(identifier)) + elif is_sparse_categorical: + metric_obj = metrics_module.SparseCategoricalAccuracy(name=str(identifier)) + else: + metric_obj = metrics_module.CategoricalAccuracy(name=str(identifier)) + if isinstance(identifier, str): + metric_name = identifier + else: + metric_name = get_object_name(metric_obj) + if not isinstance(metric_obj, metrics_module.Metric): + metric_obj = metrics_module.MeanMetricWrapper(metric_obj) + metric_obj.name = metric_name + return metric_obj + +def get_loss(identifier, y_true, y_pred): + if identifier is None: + return None + if str(identifier).lower() not in ['crossentropy', 'ce']: + loss_obj = losses_module.get(identifier) + else: + (is_binary, is_sparse_categorical) = is_binary_or_sparse_categorical(y_true, y_pred) + if is_binary: + loss_obj = losses_module.binary_crossentropy + elif is_sparse_categorical: + loss_obj = losses_module.sparse_categorical_crossentropy + else: + loss_obj = losses_module.categorical_crossentropy + if not isinstance(loss_obj, losses_module.Loss): + if isinstance(identifier, str): + loss_name = identifier + else: + loss_name = get_object_name(loss_obj) + loss_obj = losses_module.LossFunctionWrapper(loss_obj, name=loss_name) + return loss_obj + +class CompileMetrics(metrics_module.Metric): + + def __init__(self, metrics, weighted_metrics, name='compile_metric', output_names=None): + super().__init__(name=name) + if metrics and (not isinstance(metrics, (list, tuple, dict))): + raise ValueError(f'Expected `metrics` argument to be a list, tuple, or dict. Received instead: metrics={metrics} of type {type(metrics)}') + if weighted_metrics and (not isinstance(weighted_metrics, (list, tuple, dict))): + raise ValueError(f'Expected `weighted_metrics` argument to be a list, tuple, or dict. Received instead: weighted_metrics={weighted_metrics} of type {type(weighted_metrics)}') + self._user_metrics = metrics + self._user_weighted_metrics = weighted_metrics + self.built = False + self.name = 'compile_metrics' + self.output_names = output_names + + @property + def metrics(self): + if not self.built: + return [] + metrics = [] + for m in self._flat_metrics + self._flat_weighted_metrics: + if isinstance(m, MetricsList): + metrics.extend(m.metrics) + elif m is not None: + metrics.append(m) + return metrics + + @property + def variables(self): + if not self.built: + return [] + vars = [] + for m in self.metrics: + if m is not None: + vars.extend(m.variables) + return vars + + def build(self, y_true, y_pred): + if self.output_names: + output_names = self.output_names + elif isinstance(y_pred, dict): + output_names = sorted(list(y_pred.keys())) + elif isinstance(y_pred, (list, tuple)): + num_outputs = len(y_pred) + if all((hasattr(x, '_keras_history') for x in y_pred)): + output_names = [x._keras_history.operation.name for x in y_pred] + else: + output_names = None + else: + output_names = None + num_outputs = 1 + if output_names: + num_outputs = len(output_names) + y_pred = self._flatten_y(y_pred) + y_true = self._flatten_y(y_true) + metrics = self._user_metrics + weighted_metrics = self._user_weighted_metrics + self._flat_metrics = self._build_metrics_set(metrics, num_outputs, output_names, y_true, y_pred, argument_name='metrics') + self._flat_weighted_metrics = self._build_metrics_set(weighted_metrics, num_outputs, output_names, y_true, y_pred, argument_name='weighted_metrics') + self.built = True + + def _build_metrics_set(self, metrics, num_outputs, output_names, y_true, y_pred, argument_name): + flat_metrics = [] + if isinstance(metrics, dict): + for name in metrics.keys(): + if name not in output_names: + raise ValueError(f"In the dict argument `{argument_name}`, key '{name}' does not correspond to any model output. Received:\n{argument_name}={metrics}") + if num_outputs == 1: + if not metrics: + flat_metrics.append(None) + else: + if isinstance(metrics, dict): + metrics = tree.flatten(metrics) + if not isinstance(metrics, list): + metrics = [metrics] + if not all((is_function_like(m) for m in metrics)): + raise ValueError(f'Expected all entries in the `{argument_name}` list to be metric objects. Received instead:\n{argument_name}={metrics}') + flat_metrics.append(MetricsList([get_metric(m, y_true[0], y_pred[0]) for m in metrics if m is not None])) + elif isinstance(metrics, (list, tuple)): + if len(metrics) != len(y_pred): + raise ValueError(f'For a model with multiple outputs, when providing the `{argument_name}` argument as a list, it should have as many entries as the model has outputs. Received:\n{argument_name}={metrics}\nof length {len(metrics)} whereas the model has {len(y_pred)} outputs.') + for (idx, (mls, yt, yp)) in enumerate(zip(metrics, y_true, y_pred)): + if not isinstance(mls, list): + mls = [mls] + name = output_names[idx] if output_names else None + if not all((is_function_like(e) for e in mls)): + raise ValueError(f'All entries in the sublists of the `{argument_name}` list should be metric objects. Found the following sublist with unknown types: {mls}') + flat_metrics.append(MetricsList([get_metric(m, yt, yp) for m in mls if m is not None], output_name=name)) + elif isinstance(metrics, dict): + if output_names is None: + raise ValueError(f'Argument `{argument_name}` can only be provided as a dict when the model also returns a dict of outputs. Received {argument_name}={metrics}') + for name in metrics.keys(): + if not isinstance(metrics[name], list): + metrics[name] = [metrics[name]] + if not all((is_function_like(e) for e in metrics[name])): + raise ValueError(f"All entries in the sublists of the `{argument_name}` dict should be metric objects. At key '{name}', found the following sublist with unknown types: {metrics[name]}") + for (name, yt, yp) in zip(output_names, y_true, y_pred): + if name in metrics: + flat_metrics.append(MetricsList([get_metric(m, yt, yp) for m in metrics[name] if m is not None], output_name=name)) + else: + flat_metrics.append(None) + return flat_metrics + + def _flatten_y(self, y): + if isinstance(y, dict) and self.output_names: + result = [] + for name in self.output_names: + if name in y: + result.append(y[name]) + return result + return tree.flatten(y) + + def update_state(self, y_true, y_pred, sample_weight=None): + if not self.built: + self.build(y_true, y_pred) + y_true = self._flatten_y(y_true) + y_pred = self._flatten_y(y_pred) + for (m, y_t, y_p) in zip(self._flat_metrics, y_true, y_pred): + if m: + m.update_state(y_t, y_p) + if sample_weight is not None: + sample_weight = self._flatten_y(sample_weight) + if len(sample_weight) < len(y_true): + sample_weight = [sample_weight[0] for _ in range(len(y_true))] + else: + sample_weight = [None for _ in range(len(y_true))] + for (m, y_t, y_p, s_w) in zip(self._flat_weighted_metrics, y_true, y_pred, sample_weight): + if m: + m.update_state(y_t, y_p, s_w) + + def reset_state(self): + if not self.built: + return + for m in self._flat_metrics: + if m: + m.reset_state() + for m in self._flat_weighted_metrics: + if m: + m.reset_state() + + def result(self): + if not self.built: + raise ValueError('Cannot get result() since the metric has not yet been built.') + results = {} + unique_name_counters = {} + for mls in self._flat_metrics: + if not mls: + continue + for m in mls.metrics: + name = m.name + if mls.output_name: + name = f'{mls.output_name}_{name}' + if name not in unique_name_counters: + results[name] = m.result() + unique_name_counters[name] = 1 + else: + index = unique_name_counters[name] + unique_name_counters[name] += 1 + name = f'{name}_{index}' + results[name] = m.result() + for mls in self._flat_weighted_metrics: + if not mls: + continue + for m in mls.metrics: + name = m.name + if mls.output_name: + name = f'{mls.output_name}_{name}' + if name not in unique_name_counters: + results[name] = m.result() + unique_name_counters[name] = 1 + else: + name = f'weighted_{m.name}' + if mls.output_name: + name = f'{mls.output_name}_{name}' + if name not in unique_name_counters: + unique_name_counters[name] = 1 + else: + index = unique_name_counters[name] + unique_name_counters[name] += 1 + name = f'{name}_{index}' + results[name] = m.result() + return results + + def get_config(self): + raise NotImplementedError + + @classmethod + def from_config(cls, config): + raise NotImplementedError + +class CompileLoss(losses_module.Loss): + + def __init__(self, loss, loss_weights=None, reduction='sum_over_batch_size', output_names=None): + if loss_weights and (not isinstance(loss_weights, (list, tuple, dict, float))): + raise ValueError(f'Expected `loss_weights` argument to be a float (single output case) or a list, tuple, or dict (multiple output case). Received instead: loss_weights={loss_weights} of type {type(loss_weights)}') + self._user_loss = loss + self._user_loss_weights = loss_weights + self.built = False + self.output_names = output_names + super().__init__(name='compile_loss', reduction=reduction) + self.inferred_output_names = None + self._metrics = [] + self._tracker = Tracker({'metrics': (lambda x: isinstance(x, metrics_module.Metric), self._metrics)}) + + @property + def metrics(self): + return self._metrics + + @property + def variables(self): + vars = [] + for m in self.metrics: + vars.extend(m.variables) + return vars + + def build(self, y_true, y_pred): + loss = self._user_loss + loss_weights = self._user_loss_weights + output_names = self._get_y_pred_output_names(y_pred) + inferred_output_names = output_names or self.output_names + if is_function_like(loss) and tree.is_nested(y_pred): + loss = tree.map_structure(lambda x: loss, y_pred) + if isinstance(loss, dict): + if inferred_output_names is None: + raise ValueError(f'Argument `loss` can only be provided as a dict when the model also returns a dict of outputs. Received loss={loss}') + filtered_y_pred_keys = [] + filtered_y_true_keys = [] + if isinstance(loss, dict): + loss_keys = set(loss.keys()) + if inferred_output_names is not None: + y_pred_keys = set(inferred_output_names) + if len(loss_keys - y_pred_keys) > 0: + raise KeyError(f"There are keys: {list(loss_keys - y_pred_keys)} in the `loss` argument, but they can't be found in the model's output (`y_pred`).") + filtered_y_pred_keys.extend(list(y_pred_keys - loss_keys)) + if isinstance(y_true, dict): + y_true_keys = set(y_true.keys()) + if len(loss_keys - y_true_keys) > 0: + raise KeyError(f"There are keys: {list(loss_keys - y_true_keys)} in the `loss` argument, but they can't be found in `y` (`y_true`).") + filtered_y_true_keys.extend(list(y_true_keys - loss_keys)) + filtered_y_pred_keys = set(filtered_y_pred_keys) + filtered_y_true_keys = set(filtered_y_true_keys) + (y_true, y_pred) = self._filter_unused_inputs(y_true, y_pred, filtered_y_true_keys, filtered_y_pred_keys, self.inferred_output_names) + flat_losses = tree.flatten(loss) + if loss_weights is None: + flat_loss_weights = [None] * len(flat_losses) + else: + flat_loss_weights = tree.flatten(loss_weights) + for loss_weight in flat_loss_weights: + if not isinstance(loss_weight, (int, float, type(None))): + raise TypeError(f'When providing the `loss_weights` argument, each element should be a Python int, float (the weighting coefficient corresponding to the loss for that output) or `None`.Received: loss_weights={loss_weights}') + if len(flat_loss_weights) != len(flat_losses): + raise ValueError(f'When providing the `loss_weights` argument, it should have equal length of `loss` argument. Received: loss_weights length={len(flat_loss_weights)}, loss length={len(flat_losses)}') + y_true = tree.flatten(y_true) + y_pred = tree.flatten(y_pred) + if len(y_pred) != len(flat_losses): + raise ValueError(f'For a model with multiple outputs, when providing the `loss` argument as a list, it should have as many entries as the model has outputs. Received:\nloss={loss}\nof length {len(flat_losses)} whereas the model has {len(y_pred)} outputs.') + flat_losses = [get_loss(identifier, _y_true, _y_pred) for (identifier, _y_true, _y_pred) in zip(flat_losses, y_true, y_pred)] + if len(flat_losses) > 1: + for (i, _loss) in enumerate(flat_losses): + if _loss is not None: + if inferred_output_names is not None and len(inferred_output_names) == len(flat_losses): + name = inferred_output_names[i] + else: + name = _loss.name + name += '_loss' + self._tracker.add_to_store('metrics', metrics_module.Mean(name=name)) + self.flat_losses = flat_losses + self.flat_loss_weights = flat_loss_weights + self.filtered_y_true_keys = filtered_y_true_keys + self.filtered_y_pred_keys = filtered_y_pred_keys + self.inferred_output_names = inferred_output_names + self.built = True + + def _get_y_pred_output_names(self, y_pred): + if isinstance(y_pred, dict): + output_names = sorted(y_pred.keys()) + else: + y_pred = tree.flatten(y_pred) + if all((hasattr(x, '_keras_history') for x in y_pred)): + output_names = [x._keras_history.operation.name for x in y_pred] + else: + output_names = None + return output_names + + def _filter_unused_inputs(self, y_true, y_pred, filtered_y_true_keys, filtered_y_pred_keys, output_names): + if len(filtered_y_true_keys) > 0: + if isinstance(y_true, dict): + for k in filtered_y_true_keys: + y_true.pop(k) + if len(filtered_y_pred_keys) > 0: + if isinstance(y_pred, dict): + for k in filtered_y_pred_keys: + y_pred.pop(k) + elif output_names is not None: + y_pred = [] + for (x, output_name) in zip(tree.flatten(y_pred), output_names): + if output_name not in filtered_y_pred_keys: + y_pred.append(x) + return (y_true, y_pred) + + def __call__(self, y_true, y_pred, sample_weight=None): + with ops.name_scope(self.name): + return self.call(y_true, y_pred, sample_weight) + + def call(self, y_true, y_pred, sample_weight=None): + if not self.built: + self.build(y_true, y_pred) + else: + (y_true, y_pred) = self._filter_unused_inputs(y_true, y_pred, self.filtered_y_true_keys, self.filtered_y_pred_keys, self.inferred_output_names) + y_true = tree.flatten(y_true) + y_pred = tree.flatten(y_pred) + if sample_weight is not None: + sample_weight = tree.flatten(sample_weight) + if len(sample_weight) < len(y_true): + sample_weight = [sample_weight[0] for _ in range(len(y_true))] + else: + sample_weight = [None for _ in y_true] + metrics = [None] if len(self.metrics) == 0 else self.metrics + loss_values = [] + for (loss_fn, y_t, y_p, loss_weight, sample_weight, metric) in zip(self.flat_losses, y_true, y_pred, self.flat_loss_weights, sample_weight, metrics): + if loss_fn: + value = ops.cast(loss_fn(y_t, y_p, sample_weight), dtype=self.dtype) + if loss_weight is not None: + value = ops.multiply(value, loss_weight) + loss_values.append(value) + if metric: + metric.update_state(value, sample_weight=tree.flatten(y_p)[0].shape[0]) + if loss_values: + total_loss = sum(loss_values) + return total_loss + return None + + def get_config(self): + raise NotImplementedError + + @classmethod + def from_config(cls, config): + raise NotImplementedError + +# File: keras-master/keras/src/trainers/data_adapters/__init__.py +import types +from keras.src.distribution import distribution_lib +from keras.src.trainers.data_adapters import array_data_adapter +from keras.src.trainers.data_adapters import py_dataset_adapter +from keras.src.trainers.data_adapters.array_data_adapter import ArrayDataAdapter +from keras.src.trainers.data_adapters.generator_data_adapter import GeneratorDataAdapter +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDatasetAdapter +from keras.src.trainers.data_adapters.tf_dataset_adapter import TFDatasetAdapter +from keras.src.trainers.data_adapters.torch_data_loader_adapter import TorchDataLoaderAdapter + +def get_data_adapter(x, y=None, sample_weight=None, batch_size=None, steps_per_epoch=None, shuffle=False, class_weight=None): + distribution = distribution_lib.distribution() + if getattr(distribution, '_is_multi_process', False) and (not is_tf_dataset(x)): + raise ValueError(f'When using multi-worker distribution, the data must be provided as a `tf.data.Dataset` instance. Received: type(x)={type(x)}.') + if array_data_adapter.can_convert_arrays((x, y, sample_weight)): + return ArrayDataAdapter(x, y, sample_weight=sample_weight, class_weight=class_weight, shuffle=shuffle, batch_size=batch_size, steps=steps_per_epoch) + elif is_tf_dataset(x): + if y is not None: + raise_unsupported_arg('y', 'the targets', 'tf.data.Dataset') + if sample_weight is not None: + raise_unsupported_arg('sample_weights', 'the sample weights', 'tf.data.Dataset') + return TFDatasetAdapter(x, class_weight=class_weight, distribution=distribution) + elif isinstance(x, py_dataset_adapter.PyDataset): + if y is not None: + raise_unsupported_arg('y', 'the targets', 'PyDataset') + if sample_weight is not None: + raise_unsupported_arg('sample_weights', 'the sample weights', 'PyDataset') + return PyDatasetAdapter(x, class_weight=class_weight, shuffle=shuffle) + elif is_torch_dataloader(x): + if y is not None: + raise_unsupported_arg('y', 'the targets', 'torch DataLoader') + if sample_weight is not None: + raise_unsupported_arg('sample_weights', 'the sample weights', 'torch DataLoader') + if class_weight is not None: + raise ValueError(f'Argument `class_weight` is not supported for torch DataLoader inputs. Received: class_weight={class_weight}') + return TorchDataLoaderAdapter(x) + elif isinstance(x, types.GeneratorType): + if y is not None: + raise_unsupported_arg('y', 'the targets', 'PyDataset') + if sample_weight is not None: + raise_unsupported_arg('sample_weights', 'the sample weights', 'PyDataset') + if class_weight is not None: + raise ValueError(f'Argument `class_weight` is not supported for Python generator inputs. Received: class_weight={class_weight}') + return GeneratorDataAdapter(x) + else: + raise ValueError(f'Unrecognized data type: x={x} (of type {type(x)})') + +def raise_unsupported_arg(arg_name, arg_description, input_type): + raise ValueError(f'When providing `x` as a {input_type}, `{arg_name}` should not be passed. Instead, {arg_description} should be included as part of the {input_type}.') + +def is_tf_dataset(x): + if hasattr(x, '__class__'): + for parent in x.__class__.__mro__: + if parent.__name__ in ('DatasetV2', 'DistributedDataset') and 'tensorflow.python.' in str(parent.__module__): + return True + return False + +def is_torch_dataloader(x): + if hasattr(x, '__class__'): + for parent in x.__class__.__mro__: + if parent.__name__ == 'DataLoader' and 'torch.utils.data' in str(parent.__module__): + return True + return False + +# File: keras-master/keras/src/trainers/data_adapters/array_data_adapter.py +import functools +import math +import numpy as np +from keras.src import tree +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter + +class ArrayDataAdapter(DataAdapter): + + def __init__(self, x, y=None, sample_weight=None, batch_size=None, steps=None, shuffle=False, class_weight=None): + if not can_convert_arrays((x, y, sample_weight)): + raise ValueError(f'Expected all elements of `x` to be array-like. Received invalid types: x={x}') + if sample_weight is not None: + if class_weight is not None: + raise ValueError('You cannot `class_weight` and `sample_weight` at the same time.') + if tree.is_nested(y): + if isinstance(sample_weight, (list, tuple, dict)): + try: + tree.assert_same_structure(y, sample_weight) + except ValueError: + raise ValueError(f'You should provide one `sample_weight` array per output in `y`. The two structures did not match:\n- y: {y}\n- sample_weight: {sample_weight}\n') + else: + is_samplewise = len(sample_weight.shape) == 1 or (len(sample_weight.shape) == 2 and sample_weight.shape[1] == 1) + if not is_samplewise: + raise ValueError('For a model with multiple outputs, when providing a single `sample_weight` array, it should only have one scalar score per sample (i.e. shape `(num_samples,)`). If you want to use non-scalar sample weights, pass a `sample_weight` argument with one array per model output.') + sample_weight = tree.map_structure(lambda _: sample_weight, y) + if class_weight is not None: + if tree.is_nested(y): + raise ValueError('`class_weight` is only supported for Models with a single output.') + sample_weight = data_adapter_utils.class_weight_to_sample_weights(y, class_weight) + inputs = data_adapter_utils.pack_x_y_sample_weight(x, y, sample_weight) + data_adapter_utils.check_data_cardinality(inputs) + num_samples = set((i.shape[0] for i in tree.flatten(inputs))).pop() + self._num_samples = num_samples + self._inputs = inputs + if not batch_size: + batch_size = int(math.ceil(num_samples / steps)) if steps else 32 + self._size = int(math.ceil(num_samples / batch_size)) + self._batch_size = batch_size + self._partial_batch_size = num_samples % batch_size + self._shuffle = shuffle + + def get_numpy_iterator(self): + inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='numpy') + + def slice_and_convert_to_numpy(sliceable, indices=None): + x = sliceable[indices] + x = sliceable.convert_to_numpy(x) + return x + return self._get_iterator(slice_and_convert_to_numpy, inputs) + + def get_tf_dataset(self): + from keras.src.utils.module_utils import tensorflow as tf + shuffle = self._shuffle + batch_size = self._batch_size + num_samples = self._num_samples + num_full_batches = int(self._num_samples // batch_size) + indices_dataset = tf.data.Dataset.range(1) + + def permutation(_): + indices = tf.range(num_samples, dtype=tf.int64) + if shuffle and shuffle != 'batch': + indices = tf.random.shuffle(indices) + return indices + indices_dataset = indices_dataset.map(permutation).prefetch(1) + + def slice_batch_indices(indices): + num_in_full_batch = num_full_batches * batch_size + first_k_indices = tf.slice(indices, [0], [num_in_full_batch]) + first_k_indices = tf.reshape(first_k_indices, [num_full_batches, batch_size]) + flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices) + if self._partial_batch_size: + index_remainder = tf.data.Dataset.from_tensors(tf.slice(indices, [num_in_full_batch], [self._partial_batch_size])) + flat_dataset = flat_dataset.concatenate(index_remainder) + return flat_dataset + + def slice_inputs(indices_dataset, inputs): + inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='tensorflow') + inputs = tree.lists_to_tuples(inputs) + dataset = tf.data.Dataset.zip((indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat())) + + def grab_batch(i, data): + + def grab_one(x): + if isinstance(x, array_slicing.TensorflowSparseWrapper): + return array_slicing.slice_tensorflow_sparse_wrapper(x, i) + if isinstance(x, (list, tuple, dict)): + return None + if tf.is_tensor(x): + return tf.gather(x, i, axis=0) + return x + return tree.traverse(grab_one, data) + dataset = dataset.map(grab_batch, num_parallel_calls=tf.data.AUTOTUNE) + options = tf.data.Options() + options.experimental_optimization.apply_default_optimizations = False + if self._shuffle: + options.experimental_external_state_policy = tf.data.experimental.ExternalStatePolicy.IGNORE + dataset = dataset.with_options(options) + return dataset + indices_dataset = indices_dataset.flat_map(slice_batch_indices) + if shuffle == 'batch': + indices_dataset = indices_dataset.map(tf.random.shuffle) + dataset = slice_inputs(indices_dataset, self._inputs) + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA + dataset = dataset.with_options(options) + return dataset.prefetch(tf.data.AUTOTUNE) + + def get_jax_iterator(self): + inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='jax') + + def slice_and_convert_to_jax(sliceable, indices=None): + x = sliceable[indices] + x = sliceable.convert_to_jax_compatible(x) + return x + return self._get_iterator(slice_and_convert_to_jax, inputs) + + def get_torch_dataloader(self): + import torch + from keras.src.backend.torch.core import convert_to_tensor + + class ArrayDataset(torch.utils.data.Dataset): + + def __init__(self, array): + self.array = array + + def __getitems__(self, indices): + + def slice_and_convert(sliceable): + x = sliceable[indices] + x = sliceable.convert_to_torch_compatible(x) + x = convert_to_tensor(x) + return x + return tree.map_structure(slice_and_convert, self.array) + + def __len__(self): + return len(self.array[0]) + + class RandomBatchSampler(torch.utils.data.Sampler): + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + for batch in self.sampler: + yield [batch[i] for i in torch.randperm(len(batch))] + + def __len__(self): + return len(self.sampler) + if self._shuffle == 'batch': + batch_sampler = RandomBatchSampler(torch.utils.data.BatchSampler(range(self._num_samples), batch_size=self._batch_size, drop_last=False)) + elif self._shuffle: + batch_sampler = torch.utils.data.BatchSampler(torch.utils.data.RandomSampler(range(self._num_samples)), batch_size=self._batch_size, drop_last=False) + else: + batch_sampler = torch.utils.data.BatchSampler(torch.utils.data.SequentialSampler(range(self._num_samples)), batch_size=self._batch_size, drop_last=False) + + def no_op_collate(batch): + return batch + inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='torch') + dataset = ArrayDataset(inputs) + return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=no_op_collate) + + def _get_iterator(self, slice_and_convert_fn, inputs): + global_permutation = None + if self._shuffle and self._shuffle != 'batch': + global_permutation = np.random.permutation(self._num_samples) + for i in range(self._size): + start = i * self._batch_size + stop = min((i + 1) * self._batch_size, self._num_samples) + if self._shuffle == 'batch': + indices = np.random.permutation(stop - start) + start + elif self._shuffle: + indices = global_permutation[start:stop] + else: + indices = slice(start, stop) + slice_indices_and_convert_fn = functools.partial(slice_and_convert_fn, indices=indices) + yield tree.map_structure(slice_indices_and_convert_fn, inputs) + + @property + def num_batches(self): + return self._size + + @property + def batch_size(self): + return self._batch_size + + @property + def has_partial_batch(self): + return self._partial_batch_size > 0 + + @property + def partial_batch_size(self): + return self._partial_batch_size or None + +def can_convert_arrays(arrays): + return all(tree.flatten(tree.map_structure(array_slicing.can_slice_array, arrays))) + +# File: keras-master/keras/src/trainers/data_adapters/array_slicing.py +import collections +import math +import numpy as np +from keras.src import backend +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +try: + import pandas +except ImportError: + pandas = None +ARRAY_TYPES = (np.ndarray,) +if pandas: + ARRAY_TYPES = ARRAY_TYPES + (pandas.Series, pandas.DataFrame) + +class Sliceable: + + def __init__(self, array): + self.array = array + + def __getitem__(self, indices): + return self.array[indices] + + @classmethod + def cast(cls, x, dtype): + return x.astype(dtype) + + @classmethod + def convert_to_numpy(cls, x): + return x + + @classmethod + def convert_to_tf_dataset_compatible(cls, x): + return x + + @classmethod + def convert_to_jax_compatible(cls, x): + return x + + @classmethod + def convert_to_torch_compatible(cls, x): + return x + +class NumpySliceable(Sliceable): + pass + +class TensorflowSliceable(Sliceable): + + def __getitem__(self, indices): + from keras.src.utils.module_utils import tensorflow as tf + if isinstance(indices, slice): + return self.array[indices] + else: + return tf.gather(self.array, indices, axis=0) + + @classmethod + def cast(cls, x, dtype): + from keras.src.backend.tensorflow.core import cast + return cast(x, dtype) + + @classmethod + def convert_to_numpy(cls, x): + from keras.src.backend.tensorflow.core import convert_to_numpy + return convert_to_numpy(x) + +class TensorflowRaggedSliceable(TensorflowSliceable): + + @classmethod + def convert_to_jax_compatible(cls, x): + return cls.convert_to_numpy(x) + + @classmethod + def convert_to_torch_compatible(cls, x): + return x.to_tensor() + +class TensorflowSparseSliceable(TensorflowSliceable): + + def __init__(self, array): + super().__init__(to_tensorflow_sparse_wrapper(array)) + + @property + def shape(self): + return self.array.sparse.shape + + def __getitem__(self, indices): + return slice_tensorflow_sparse_wrapper(self.array, indices) + + @classmethod + def convert_to_tf_dataset_compatible(cls, x): + return to_tensorflow_sparse_wrapper(x) + + @classmethod + def convert_to_jax_compatible(cls, x): + return data_adapter_utils.tf_sparse_to_jax_sparse(x) + + @classmethod + def convert_to_torch_compatible(cls, x): + from keras.src.backend.tensorflow import sparse as tf_sparse + return tf_sparse.sparse_to_dense(x) + +class JaxSparseSliceable(Sliceable): + + def __getitem__(self, indices): + return self.array[indices, ...] + + @classmethod + def convert_to_numpy(cls, x): + from keras.src.backend.jax.core import convert_to_numpy + return convert_to_numpy(x) + + @classmethod + def convert_to_tf_dataset_compatible(cls, array): + return to_tensorflow_sparse_wrapper(data_adapter_utils.jax_sparse_to_tf_sparse(array)) + + @classmethod + def convert_to_torch_compatible(cls, x): + return x.todense() + +class TorchSliceable(Sliceable): + + @classmethod + def cast(cls, x, dtype): + from keras.src.backend.torch.core import cast + return cast(x, dtype) + + @classmethod + def convert_to_numpy(cls, x): + from keras.src.backend.torch.core import convert_to_numpy + return convert_to_numpy(x) + +class PandasSliceable(Sliceable): + + def __getitem__(self, indices): + return self.array.iloc[indices] + + @classmethod + def convert_to_numpy(cls, x): + return x.to_numpy() + + @classmethod + def convert_to_tf_dataset_compatible(cls, x): + return cls.convert_to_numpy(x) + + @classmethod + def convert_to_jax_compatible(cls, x): + return cls.convert_to_numpy(x) + + @classmethod + def convert_to_torch_compatible(cls, x): + return cls.convert_to_numpy(x) + +class PandasDataFrameSliceable(PandasSliceable): + pass + +class PandasSeriesSliceable(PandasSliceable): + + @classmethod + def convert_to_numpy(cls, x): + return np.expand_dims(x.to_numpy(), axis=-1) + +class ScipySparseSliceable(Sliceable): + + def __init__(self, array): + super().__init__(array.tocsr()) + + @classmethod + def convert_to_numpy(cls, x): + return x.todense() + + @classmethod + def convert_to_tf_dataset_compatible(cls, x): + return to_tensorflow_sparse_wrapper(data_adapter_utils.scipy_sparse_to_tf_sparse(x)) + + @classmethod + def convert_to_jax_compatible(cls, x): + return data_adapter_utils.scipy_sparse_to_jax_sparse(x) + + @classmethod + def convert_to_torch_compatible(cls, x): + return x.todense() +TensorflowSparseWrapper = collections.namedtuple('TensorflowSparseWrapper', ['sparse', 'ragged_indices', 'ragged_values']) + +def to_tensorflow_sparse_wrapper(sparse): + from keras.src.utils.module_utils import tensorflow as tf + row_ids = sparse.indices[:, 0] + row_splits = tf.experimental.RowPartition.from_value_rowids(row_ids).row_splits() + ragged_indices = tf.cast(tf.RaggedTensor.from_row_splits(sparse.indices, row_splits), tf.int64) + ragged_values = tf.RaggedTensor.from_row_splits(sparse.values, row_splits) + return TensorflowSparseWrapper(sparse, ragged_indices, ragged_values) + +def slice_tensorflow_sparse_wrapper(sparse_wrapper, indices): + from keras.src.utils.module_utils import tensorflow as tf + if isinstance(indices, slice): + sparse_indices = sparse_wrapper.ragged_indices[indices] + sparse_values = sparse_wrapper.ragged_values[indices] + batch_dim = indices.stop - indices.start + else: + sparse_indices = tf.gather(sparse_wrapper.ragged_indices, indices) + sparse_values = tf.gather(sparse_wrapper.ragged_values, indices) + if isinstance(indices, list): + batch_dim = len(indices) + else: + batch_dim = indices.shape[0] + if batch_dim is None: + batch_dim = tf.shape(indices)[0] + row_ids = sparse_indices.value_rowids() + sparse_indices = sparse_indices.flat_values[:, 1:] + sparse_indices = tf.concat([tf.expand_dims(row_ids, -1), sparse_indices], axis=1) + sparse_values = sparse_values.flat_values + sparse_shape = (batch_dim,) + tuple(sparse_wrapper.sparse.shape.as_list()[1:]) + return tf.SparseTensor(sparse_indices, sparse_values, sparse_shape) + +def can_slice_array(x): + return x is None or isinstance(x, ARRAY_TYPES) or data_adapter_utils.is_tensorflow_tensor(x) or data_adapter_utils.is_jax_array(x) or data_adapter_utils.is_torch_tensor(x) or data_adapter_utils.is_scipy_sparse(x) or hasattr(x, '__array__') + +def convert_to_sliceable(arrays, target_backend=None): + + def convert_single_array(x): + if x is None: + return x + if isinstance(x, np.ndarray): + sliceable_class = NumpySliceable + elif data_adapter_utils.is_tensorflow_tensor(x): + if data_adapter_utils.is_tensorflow_ragged(x): + sliceable_class = TensorflowRaggedSliceable + elif data_adapter_utils.is_tensorflow_sparse(x): + sliceable_class = TensorflowSparseSliceable + else: + sliceable_class = TensorflowSliceable + elif data_adapter_utils.is_jax_array(x): + if data_adapter_utils.is_jax_sparse(x): + sliceable_class = JaxSparseSliceable + else: + x = np.asarray(x) + sliceable_class = NumpySliceable + elif data_adapter_utils.is_torch_tensor(x): + sliceable_class = TorchSliceable + elif pandas is not None and isinstance(x, pandas.DataFrame): + sliceable_class = PandasDataFrameSliceable + elif pandas is not None and isinstance(x, pandas.Series): + sliceable_class = PandasSeriesSliceable + elif data_adapter_utils.is_scipy_sparse(x): + sliceable_class = ScipySparseSliceable + elif hasattr(x, '__array__'): + x = np.asarray(x) + sliceable_class = NumpySliceable + else: + raise ValueError(f'Expected a NumPy array, tf.Tensor, tf.RaggedTensor, tf.SparseTensor, jax.np.ndarray, jax.experimental.sparse.JAXSparse, torch.Tensor, Pandas Dataframe, or Pandas Series. Received invalid input: {x} (of type {type(x)})') + + def is_non_floatx_float(dtype): + return not dtype == object and backend.is_float_dtype(dtype) and (not backend.standardize_dtype(dtype) == backend.floatx()) + cast_dtype = None + if pandas is not None and isinstance(x, pandas.DataFrame): + if any((is_non_floatx_float(d) for d in x.dtypes.values)): + cast_dtype = backend.floatx() + elif is_non_floatx_float(x.dtype): + cast_dtype = backend.floatx() + if cast_dtype is not None: + x = sliceable_class.cast(x, cast_dtype) + if target_backend is None: + return sliceable_class(x) + if target_backend == 'tensorflow': + return sliceable_class.convert_to_tf_dataset_compatible(x) + if target_backend == 'jax' and sliceable_class in (TensorflowSliceable, TorchSliceable): + x = np.asarray(x) + sliceable_class = NumpySliceable + return sliceable_class(x) + return tree.map_structure(convert_single_array, arrays) + +def train_validation_split(arrays, validation_split): + flat_arrays = tree.flatten(arrays) + unsplitable = [type(t) for t in flat_arrays if not can_slice_array(t)] + if unsplitable: + raise ValueError(f'Argument `validation_split` is only supported for tensors or NumPy arrays.Found incompatible type in the input: {unsplitable}') + if all((t is None for t in flat_arrays)): + return (arrays, arrays) + first_non_none = None + for t in flat_arrays: + if t is not None: + first_non_none = t + break + batch_dim = int(first_non_none.shape[0]) + split_at = int(math.floor(batch_dim * (1.0 - validation_split))) + if split_at == 0 or split_at == batch_dim: + raise ValueError(f'Training data contains {batch_dim} samples, which is not sufficient to split it into a validation and training set as specified by `validation_split={validation_split}`. Either provide more data, or a different value for the `validation_split` argument.') + + def _split(t, start, end): + if t is None: + return t + return t[start:end] + sliceables = convert_to_sliceable(arrays) + train_arrays = tree.map_structure(lambda x: _split(x, start=0, end=split_at), sliceables) + val_arrays = tree.map_structure(lambda x: _split(x, start=split_at, end=batch_dim), sliceables) + return (train_arrays, val_arrays) + +# File: keras-master/keras/src/trainers/data_adapters/data_adapter.py +class DataAdapter: + + def get_numpy_iterator(self): + raise NotImplementedError + + def get_tf_dataset(self): + raise NotImplementedError + + def get_jax_iterator(self): + raise NotImplementedError + + def get_torch_dataloader(self): + raise NotImplementedError + + @property + def num_batches(self): + raise NotImplementedError + + @property + def batch_size(self): + raise NotImplementedError + + @property + def has_partial_batch(self): + raise NotImplementedError + + @property + def partial_batch_size(self): + raise NotImplementedError + + def on_epoch_begin(self): + pass + + def on_epoch_end(self): + pass + +# File: keras-master/keras/src/trainers/data_adapters/data_adapter_utils.py +import numpy as np +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +NUM_BATCHES_FOR_TENSOR_SPEC = 2 + +@keras_export('keras.utils.unpack_x_y_sample_weight') +def unpack_x_y_sample_weight(data): + if isinstance(data, list): + data = tuple(data) + if not isinstance(data, tuple): + return (data, None, None) + elif len(data) == 1: + return (data[0], None, None) + elif len(data) == 2: + return (data[0], data[1], None) + elif len(data) == 3: + return (data[0], data[1], data[2]) + error_msg = f'Data is expected to be in format `x`, `(x,)`, `(x, y)`, or `(x, y, sample_weight)`, found: {data}' + raise ValueError(error_msg) + +@keras_export('keras.utils.pack_x_y_sample_weight') +def pack_x_y_sample_weight(x, y=None, sample_weight=None): + if y is None: + if not isinstance(x, (tuple, list)): + return x + else: + return (x,) + elif sample_weight is None: + return (x, y) + else: + return (x, y, sample_weight) + +def list_to_tuple(maybe_list): + if isinstance(maybe_list, list): + return tuple(maybe_list) + return maybe_list + +def check_data_cardinality(data): + num_samples = set((int(i.shape[0]) for i in tree.flatten(data))) + if len(num_samples) > 1: + msg = 'Data cardinality is ambiguous. Make sure all arrays contain the same number of samples.' + for (label, single_data) in zip(['x', 'y', 'sample_weight'], data): + sizes = ', '.join((str(i.shape[0]) for i in tree.flatten(single_data))) + msg += f"'{label}' sizes: {sizes}\n" + raise ValueError(msg) + +def class_weight_to_sample_weights(y, class_weight): + sample_weight = np.ones(shape=(y.shape[0],), dtype=backend.floatx()) + if len(y.shape) > 1: + if y.shape[-1] != 1: + y = np.argmax(y, axis=-1) + else: + y = np.squeeze(y, axis=-1) + y = np.round(y).astype('int32') + for i in range(y.shape[0]): + sample_weight[i] = class_weight.get(int(y[i]), 1.0) + return sample_weight + +def get_tensor_spec(batches): + from keras.src.utils.module_utils import tensorflow as tf + + def get_single_tensor_spec(*tensors): + x = tensors[0] + rank = len(x.shape) + if rank < 1: + raise ValueError(f'When passing a dataset to a Keras model, the arrays must be at least rank 1. Received: {x} of rank {len(x.shape)}.') + for t in tensors: + if len(t.shape) != rank: + raise ValueError(f'When passing a dataset to a Keras model, the corresponding arrays in each batch must have the same rank. Received: {x} and {t}') + shape = [] + for dims in zip(*[list(x.shape) for x in tensors]): + dims_set = set(dims) + shape.append(dims_set.pop() if len(dims_set) == 1 else None) + shape[0] = None + dtype = backend.standardize_dtype(x.dtype) + if isinstance(x, tf.RaggedTensor): + return tf.RaggedTensorSpec(shape=shape, dtype=dtype) + if isinstance(x, tf.SparseTensor) or is_scipy_sparse(x) or is_jax_sparse(x): + return tf.SparseTensorSpec(shape=shape, dtype=dtype) + else: + return tf.TensorSpec(shape=shape, dtype=dtype) + return tree.map_structure(get_single_tensor_spec, *batches) + +def get_jax_iterator(iterable): + import jax + import jax.experimental.sparse as jax_sparse + + def convert_to_jax_compatible(x): + if isinstance(x, (jax.Array, jax_sparse.JAXSparse, np.ndarray)): + return x + elif is_scipy_sparse(x): + return scipy_sparse_to_jax_sparse(x) + elif is_tensorflow_sparse(x): + return tf_sparse_to_jax_sparse(x) + else: + return np.asarray(x) + for batch in iterable: + yield tree.map_structure(convert_to_jax_compatible, batch) + +def get_numpy_iterator(iterable): + + def convert_to_numpy(x): + if not isinstance(x, np.ndarray): + if hasattr(x, '__array__'): + if is_torch_tensor(x): + x = x.cpu() + x = np.asarray(x) + return x + for batch in iterable: + yield tree.map_structure(convert_to_numpy, batch) + +def get_torch_dataloader(iterable): + import torch.utils.data as torch_data + from keras.src.backend.torch.core import convert_to_tensor + + class ConverterIterableDataset(torch_data.IterableDataset): + + def __init__(self, iterable): + self.iterable = iterable + + def __iter__(self): + for batch in self.iterable: + yield tree.map_structure(convert_to_tensor, batch) + dataset = ConverterIterableDataset(iterable) + return torch_data.DataLoader(dataset, batch_size=None) + +def is_tensorflow_tensor(value): + if hasattr(value, '__class__'): + if value.__class__.__name__ in ('RaggedTensor', 'SparseTensor'): + return 'tensorflow.python.' in str(value.__class__.__module__) + for parent in value.__class__.__mro__: + if parent.__name__ in 'Tensor' and 'tensorflow.python.' in str(parent.__module__): + return True + return False + +def is_tensorflow_ragged(value): + if hasattr(value, '__class__'): + return value.__class__.__name__ == 'RaggedTensor' and 'tensorflow.python.' in str(value.__class__.__module__) + return False + +def is_tensorflow_sparse(value): + if hasattr(value, '__class__'): + return value.__class__.__name__ == 'SparseTensor' and 'tensorflow.python.' in str(value.__class__.__module__) + return False + +def is_jax_array(value): + if hasattr(value, '__class__'): + for parent in value.__class__.__mro__: + if parent.__name__ == 'Array' and str(parent.__module__) == 'jax': + return True + return is_jax_sparse(value) + +def is_jax_sparse(value): + if hasattr(value, '__class__'): + return str(value.__class__.__module__).startswith('jax.experimental.sparse') + return False + +def is_torch_tensor(value): + if hasattr(value, '__class__'): + for parent in value.__class__.__mro__: + if parent.__name__ == 'Tensor' and str(parent.__module__).endswith('torch'): + return True + return False + +def is_scipy_sparse(x): + return str(x.__class__.__module__).startswith('scipy.sparse') and hasattr(x, 'tocoo') + +def scipy_sparse_to_tf_sparse(x): + from keras.src.utils.module_utils import tensorflow as tf + coo = x.tocoo() + indices = np.concatenate((np.expand_dims(coo.row, 1), np.expand_dims(coo.col, 1)), axis=1) + return tf.SparseTensor(indices, coo.data, coo.shape) + +def scipy_sparse_to_jax_sparse(x): + import jax + import jax.experimental.sparse as jax_sparse + with jax.default_device(jax.local_devices(backend='cpu')[0]): + return jax_sparse.BCOO.from_scipy_sparse(x) + +def tf_sparse_to_jax_sparse(x): + import jax + import jax.experimental.sparse as jax_sparse + values = np.asarray(x.values) + indices = np.asarray(x.indices) + with jax.default_device(jax.local_devices(backend='cpu')[0]): + return jax_sparse.BCOO((values, indices), shape=x.shape) + +def jax_sparse_to_tf_sparse(x): + from keras.src.utils.module_utils import tensorflow as tf + return tf.SparseTensor(x.indices, x.data, x.shape) + +# File: keras-master/keras/src/trainers/data_adapters/generator_data_adapter.py +import itertools +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter + +class GeneratorDataAdapter(DataAdapter): + + def __init__(self, generator): + (first_batches, generator) = peek_and_restore(generator) + self.generator = generator + self._first_batches = first_batches + self._output_signature = None + if not isinstance(first_batches[0], tuple): + raise ValueError(f'When passing a Python generator to a Keras model, the generator must return a tuple, either (input,) or (inputs, targets) or (inputs, targets, sample_weights). Received: {first_batches[0]}') + + def get_numpy_iterator(self): + return data_adapter_utils.get_numpy_iterator(self.generator) + + def get_jax_iterator(self): + return data_adapter_utils.get_jax_iterator(self.generator) + + def get_tf_dataset(self): + from keras.src.utils.module_utils import tensorflow as tf + + def convert_to_tf(x, spec): + if data_adapter_utils.is_scipy_sparse(x): + x = data_adapter_utils.scipy_sparse_to_tf_sparse(x) + elif data_adapter_utils.is_jax_sparse(x): + x = data_adapter_utils.jax_sparse_to_tf_sparse(x) + if not spec.shape.is_compatible_with(x.shape): + raise TypeError(f"Generator yielded an element of shape {x.shape} where an element of shape {spec.shape} was expected. Your generator provides tensors with variable input dimensions other than the batch size. Make sure that the generator's first two batches do not have the same dimension value wherever there is a variable input dimension.") + return x + + def get_tf_iterator(): + for batch in self.generator: + batch = tree.map_structure(convert_to_tf, batch, self._output_signature) + yield batch + if self._output_signature is None: + self._output_signature = data_adapter_utils.get_tensor_spec(self._first_batches) + ds = tf.data.Dataset.from_generator(get_tf_iterator, output_signature=self._output_signature) + ds = ds.prefetch(tf.data.AUTOTUNE) + return ds + + def get_torch_dataloader(self): + return data_adapter_utils.get_torch_dataloader(self.generator) + + @property + def num_batches(self): + return None + + @property + def batch_size(self): + return None + +def peek_and_restore(generator): + batches = list(itertools.islice(generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC)) + return (batches, itertools.chain(batches, generator)) + +# File: keras-master/keras/src/trainers/data_adapters/py_dataset_adapter.py +import itertools +import multiprocessing.dummy +import queue +import random +import threading +import warnings +import weakref +from contextlib import closing +import numpy as np +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter + +@keras_export(['keras.utils.PyDataset', 'keras.utils.Sequence']) +class PyDataset: + + def __init__(self, workers=1, use_multiprocessing=False, max_queue_size=10): + self._workers = workers + self._use_multiprocessing = use_multiprocessing + self._max_queue_size = max_queue_size + + def _warn_if_super_not_called(self): + warn = False + if not hasattr(self, '_workers'): + self._workers = 1 + warn = True + if not hasattr(self, '_use_multiprocessing'): + self._use_multiprocessing = False + warn = True + if not hasattr(self, '_max_queue_size'): + self._max_queue_size = 10 + warn = True + if warn: + warnings.warn('Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.', stacklevel=2) + + @property + def workers(self): + self._warn_if_super_not_called() + return self._workers + + @workers.setter + def workers(self, value): + self._workers = value + + @property + def use_multiprocessing(self): + self._warn_if_super_not_called() + return self._use_multiprocessing + + @use_multiprocessing.setter + def use_multiprocessing(self, value): + self._use_multiprocessing = value + + @property + def max_queue_size(self): + self._warn_if_super_not_called() + return self._max_queue_size + + @max_queue_size.setter + def max_queue_size(self, value): + self._max_queue_size = value + + def __getitem__(self, index): + raise NotImplementedError + + @property + def num_batches(self): + if hasattr(self, '__len__'): + return len(self) + raise NotImplementedError('You need to implement the `num_batches` property:\n\n@property\ndef num_batches(self):\n return ...') + + def on_epoch_begin(self): + pass + + def on_epoch_end(self): + pass + +class PyDatasetAdapter(DataAdapter): + + def __init__(self, x, class_weight=None, shuffle=False): + self.py_dataset = x + self.class_weight = class_weight + self.enqueuer = None + self.shuffle = shuffle + self._output_signature = None + workers = self.py_dataset.workers + use_multiprocessing = self.py_dataset.use_multiprocessing + if workers > 1 or (workers > 0 and use_multiprocessing): + self.enqueuer = OrderedEnqueuer(self.py_dataset, workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=self.py_dataset.max_queue_size, shuffle=self.shuffle) + + def _standardize_batch(self, batch): + if isinstance(batch, dict): + return batch + if isinstance(batch, np.ndarray): + batch = (batch,) + if isinstance(batch, list): + batch = tuple(batch) + if not isinstance(batch, tuple) or len(batch) not in {1, 2, 3}: + raise ValueError(f'PyDataset.__getitem__() must return a tuple or a dict. If a tuple, it must be ordered either (input,) or (inputs, targets) or (inputs, targets, sample_weights). Received: {str(batch)[:100]}... of type {type(batch)}') + if self.class_weight is not None: + if len(batch) == 3: + raise ValueError('You cannot specify `class_weight` and `sample_weight` at the same time.') + if len(batch) == 2: + sw = data_adapter_utils.class_weight_to_sample_weights(batch[1], self.class_weight) + batch = batch + (sw,) + return batch + + def _infinite_generator(self): + for i in itertools.count(): + yield self.py_dataset[i] + + def _finite_generator(self): + indices = range(self.py_dataset.num_batches) + if self.shuffle: + indices = list(indices) + random.shuffle(indices) + for i in indices: + yield self.py_dataset[i] + + def _infinite_enqueuer_generator(self): + self.enqueuer.start() + for batch in self.enqueuer.get(): + yield batch + + def _finite_enqueuer_generator(self): + self.enqueuer.start() + num_batches = self.py_dataset.num_batches + for (i, batch) in enumerate(self.enqueuer.get()): + yield batch + if i >= num_batches - 1: + self.enqueuer.stop() + return + + def _get_iterator(self): + if self.enqueuer is None: + if self.py_dataset.num_batches is None: + return self._infinite_generator() + else: + return self._finite_generator() + elif self.py_dataset.num_batches is None: + return self._infinite_enqueuer_generator() + else: + return self._finite_enqueuer_generator() + + def get_numpy_iterator(self): + return data_adapter_utils.get_numpy_iterator(self._get_iterator()) + + def get_jax_iterator(self): + return data_adapter_utils.get_jax_iterator(self._get_iterator()) + + def get_tf_dataset(self): + from keras.src.utils.module_utils import tensorflow as tf + num_batches = self.py_dataset.num_batches + if self._output_signature is None: + num_samples = data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC + if num_batches is not None: + num_samples = min(num_samples, num_batches) + batches = [self._standardize_batch(self.py_dataset[i]) for i in range(num_samples)] + self._output_signature = data_adapter_utils.get_tensor_spec(batches) + ds = tf.data.Dataset.from_generator(self._get_iterator, output_signature=self._output_signature) + if self.enqueuer is not None: + options = tf.data.Options() + options.autotune.enabled = False + options.threading.private_threadpool_size = 1 + ds = ds.with_options(options) + else: + ds = ds.prefetch(tf.data.AUTOTUNE) + return ds + + def get_torch_dataloader(self): + return data_adapter_utils.get_torch_dataloader(self._get_iterator()) + + def on_epoch_begin(self): + if self.enqueuer: + self.enqueuer.start() + self.py_dataset.on_epoch_begin() + + def on_epoch_end(self): + if self.enqueuer: + self.enqueuer.stop() + self.py_dataset.on_epoch_end() + + @property + def num_batches(self): + return self.py_dataset.num_batches + + @property + def batch_size(self): + return None +_SHARED_SEQUENCES = {} +_SEQUENCE_COUNTER = None +_DATA_POOLS = weakref.WeakSet() +_WORKER_ID_QUEUE = None +_FORCE_THREADPOOL = False + +def get_pool_class(use_multiprocessing): + global _FORCE_THREADPOOL + if not use_multiprocessing or _FORCE_THREADPOOL: + return multiprocessing.dummy.Pool + return multiprocessing.Pool + +def get_worker_id_queue(): + global _WORKER_ID_QUEUE + if _WORKER_ID_QUEUE is None: + _WORKER_ID_QUEUE = multiprocessing.Queue() + return _WORKER_ID_QUEUE + +def get_index(uid, i): + return _SHARED_SEQUENCES[uid][i] + +class PyDatasetEnqueuer: + + def __init__(self, py_dataset, workers=1, use_multiprocessing=False, max_queue_size=10): + self.py_dataset = py_dataset + global _SEQUENCE_COUNTER + if _SEQUENCE_COUNTER is None: + try: + _SEQUENCE_COUNTER = multiprocessing.Value('i', 0) + except OSError: + _SEQUENCE_COUNTER = 0 + if isinstance(_SEQUENCE_COUNTER, int): + self.uid = _SEQUENCE_COUNTER + _SEQUENCE_COUNTER += 1 + else: + with _SEQUENCE_COUNTER.get_lock(): + self.uid = _SEQUENCE_COUNTER.value + _SEQUENCE_COUNTER.value += 1 + self.ready_queue = queue.Queue() + self.future_queue = queue.Queue(max_queue_size) + self.running = False + self.start_stop_lock = threading.Lock() + self.run_thread = None + if use_multiprocessing: + self.executor_fn = self._get_executor_init(workers) + else: + self.executor_fn = lambda _: get_pool_class(False)(workers) + + def is_running(self): + return self.running + + def start(self): + with self.start_stop_lock: + if self.running: + return + self.running = True + self.run_thread = threading.Thread(target=self._run) + self.run_thread.name = f'Worker_{self.uid}' + self.run_thread.daemon = True + self.run_thread.start() + + def stop(self, drain_queue_and_join=True): + with self.start_stop_lock: + if not self.running: + return + self.running = False + if drain_queue_and_join: + while True: + try: + value = self.future_queue.get(block=True, timeout=0.1) + if isinstance(value, Exception): + raise value + inputs = value.get() + self.future_queue.task_done() + if inputs is not None: + self.ready_queue.put(inputs) + except queue.Empty: + break + self.run_thread.join() + self.run_thread = None + _SHARED_SEQUENCES[self.uid] = None + + def _send_py_dataset(self): + _SHARED_SEQUENCES[self.uid] = self.py_dataset + + def __del__(self): + self.stop(drain_queue_and_join=False) + + def _run(self): + raise NotImplementedError + + def _get_executor_init(self, workers): + raise NotImplementedError + + def get(self): + raise NotImplementedError + +class OrderedEnqueuer(PyDatasetEnqueuer): + + def __init__(self, py_dataset, workers=1, use_multiprocessing=False, max_queue_size=10, shuffle=False): + super().__init__(py_dataset, workers, use_multiprocessing, max_queue_size) + self.shuffle = shuffle + if self.py_dataset.num_batches is None: + self.indices = itertools.count() + + def _get_executor_init(self, workers): + + def pool_fn(seqs): + pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue())) + _DATA_POOLS.add(pool) + return pool + return pool_fn + + def _run(self): + try: + if self.py_dataset.num_batches is not None: + indices = range(self.py_dataset.num_batches) + if self.shuffle: + indices = list(indices) + random.shuffle(indices) + self.indices = iter(indices) + self._send_py_dataset() + with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: + while self.is_running(): + try: + i = next(self.indices) + self.future_queue.put(executor.apply_async(get_index, (self.uid, i)), block=True) + except StopIteration: + break + except Exception as e: + self.future_queue.put(e) + + def get(self): + while self.is_running(): + try: + inputs = self.ready_queue.get(block=False) + yield inputs + continue + except queue.Empty: + pass + try: + value = self.future_queue.get(block=True, timeout=5) + self.future_queue.task_done() + if isinstance(value, Exception): + raise value + inputs = value.get() + if inputs is not None: + yield inputs + except queue.Empty: + pass + except Exception as e: + self.stop(drain_queue_and_join=True) + raise e + raise ValueError('Iterator called after `on_epoch_end` and before `on_epoch_begin`.') + +def init_pool_generator(gens, random_seed=None, id_queue=None): + global _SHARED_SEQUENCES + _SHARED_SEQUENCES = gens + worker_proc = multiprocessing.current_process() + worker_proc.name = f'Keras_worker_{worker_proc.name}' + if random_seed is not None: + np.random.seed(random_seed + worker_proc.ident) + if id_queue is not None: + id_queue.put(worker_proc.ident, block=True, timeout=0.1) + +# File: keras-master/keras/src/trainers/data_adapters/tf_dataset_adapter.py +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter + +class TFDatasetAdapter(DataAdapter): + + def __init__(self, dataset, class_weight=None, distribution=None): + from keras.src.utils.module_utils import tensorflow as tf + if not isinstance(dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)): + raise ValueError(f'Expected argument `dataset` to be a tf.data.Dataset. Received: {dataset}') + if class_weight is not None: + dataset = dataset.map(make_class_weight_map_fn(class_weight)).prefetch(tf.data.AUTOTUNE) + if distribution is not None: + dataset = distribution.distribute_dataset(dataset) + self._dataset = dataset + + def get_numpy_iterator(self): + from keras.src.backend.tensorflow.core import convert_to_numpy + for batch in self._dataset: + yield tree.map_structure(convert_to_numpy, batch) + + def get_jax_iterator(self): + from keras.src.backend.tensorflow.core import convert_to_numpy + from keras.src.utils.module_utils import tensorflow as tf + + def convert_to_jax(x): + if isinstance(x, tf.SparseTensor): + return data_adapter_utils.tf_sparse_to_jax_sparse(x) + else: + return convert_to_numpy(x) + for batch in self._dataset: + yield tree.map_structure(convert_to_jax, batch) + + def get_tf_dataset(self): + return self._dataset + + def get_torch_dataloader(self): + return data_adapter_utils.get_torch_dataloader(self._dataset) + + @property + def num_batches(self): + cardinality = self._dataset.cardinality + if callable(cardinality): + cardinality = int(self._dataset.cardinality()) + else: + cardinality = int(cardinality) + if cardinality < 0: + return None + return cardinality + + @property + def batch_size(self): + first_element_spec = tree.flatten(self._dataset.element_spec)[0] + return first_element_spec.shape[0] + + @property + def has_partial_batch(self): + return None + + @property + def partial_batch_size(self): + return None + +def make_class_weight_map_fn(class_weight): + from keras.src.utils.module_utils import tensorflow as tf + class_weight_tensor = tf.convert_to_tensor([class_weight.get(int(c), 1.0) for c in range(max(class_weight.keys()) + 1)]) + + def class_weights_map_fn(*data): + (x, y, sw) = data_adapter_utils.unpack_x_y_sample_weight(data) + if sw is not None: + raise ValueError('You cannot `class_weight` and `sample_weight` at the same time.') + if tree.is_nested(y): + raise ValueError('`class_weight` is only supported for Models with a single output.') + if y.shape.rank >= 2: + y_classes = tf.__internal__.smart_cond.smart_cond(tf.shape(y)[-1] > 1, lambda : tf.argmax(y, axis=-1), lambda : tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32)) + else: + y_classes = tf.cast(tf.round(y), tf.int32) + cw = tf.gather(class_weight_tensor, y_classes) + return (x, y, cw) + return class_weights_map_fn + +# File: keras-master/keras/src/trainers/data_adapters/torch_data_loader_adapter.py +import itertools +import numpy as np +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter + +class TorchDataLoaderAdapter(DataAdapter): + + def __init__(self, dataloader): + import torch + if not isinstance(dataloader, torch.utils.data.DataLoader): + raise ValueError(f'Expected argument `dataloader` to be an instance of`torch.utils.data.DataLoader`. Received: {dataloader}') + self._dataloader = dataloader + self._output_signature = None + self._batch_size = dataloader.batch_size + self._num_batches = None + self._partial_batch_size = None + if hasattr(dataloader.dataset, '__len__'): + self._num_batches = len(dataloader) + if self._batch_size is not None: + self._partial_batch_size = len(dataloader.dataset) % self._batch_size + + def get_numpy_iterator(self): + for batch in self._dataloader: + yield tuple(tree.map_structure(lambda x: np.asarray(x.cpu()), batch)) + + def get_jax_iterator(self): + return self.get_numpy_iterator() + + def get_tf_dataset(self): + from keras.src.utils.module_utils import tensorflow as tf + if self._output_signature is None: + batches = list(itertools.islice(self._dataloader, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC)) + self._output_signature = tuple(data_adapter_utils.get_tensor_spec(batches)) + return tf.data.Dataset.from_generator(self.get_numpy_iterator, output_signature=self._output_signature) + + def get_torch_dataloader(self): + return self._dataloader + + @property + def num_batches(self): + return self._num_batches + + @property + def batch_size(self): + return self._batch_size + + @property + def has_partial_batch(self): + if self._partial_batch_size: + return self._partial_batch_size > 0 + else: + return None + + @property + def partial_batch_size(self): + return self._partial_batch_size + +# File: keras-master/keras/src/trainers/epoch_iterator.py +"""""" +import warnings +from keras.src.trainers import data_adapters + +class EpochIterator: + + def __init__(self, x, y=None, sample_weight=None, batch_size=None, steps_per_epoch=None, shuffle=False, class_weight=None, steps_per_execution=1): + self.steps_per_epoch = steps_per_epoch + self.steps_per_execution = steps_per_execution + if steps_per_epoch: + self._current_iterator = None + self._insufficient_data = False + self.data_adapter = data_adapters.get_data_adapter(x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, shuffle=shuffle, class_weight=class_weight) + self._num_batches = self.data_adapter.num_batches + + def _get_iterator(self): + return self.data_adapter.get_numpy_iterator() + + def enumerate_epoch(self): + buffer = [] + self.data_adapter.on_epoch_begin() + if self.steps_per_epoch: + if self._current_iterator is None: + self._current_iterator = iter(self._get_iterator()) + self._insufficient_data = False + for step in range(self.steps_per_epoch): + if self._insufficient_data: + break + try: + data = next(self._current_iterator) + buffer.append(data) + if len(buffer) == self.steps_per_execution: + yield (step - len(buffer) + 1, buffer) + buffer = [] + except (StopIteration,): + warnings.warn('Your input ran out of data; interrupting epoch. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.', stacklevel=2) + self._current_iterator = None + self._insufficient_data = True + if buffer: + yield (step - len(buffer) + 1, buffer) + else: + for (step, data) in enumerate(self._get_iterator()): + buffer.append(data) + if len(buffer) == self.steps_per_execution: + yield (step - len(buffer) + 1, buffer) + buffer = [] + if buffer: + yield (step - len(buffer) + 1, buffer) + if not self._num_batches: + self._num_batches = step + 1 + self.data_adapter.on_epoch_end() + + @property + def num_batches(self): + if self.steps_per_epoch: + return self.steps_per_epoch + return self._num_batches + +# File: keras-master/keras/src/trainers/trainer.py +import concurrent.futures +import inspect +import platform +import warnings +from keras.src import backend +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import optimizers +from keras.src import tree +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.saving import serialization_lib +from keras.src.trainers.compile_utils import CompileLoss +from keras.src.trainers.compile_utils import CompileMetrics +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.utils import traceback_utils +from keras.src.utils import tracking + +class Trainer: + + def __init__(self): + self._lock = False + self._run_eagerly = False + self._jit_compile = None + self.compiled = False + self.loss = None + self.steps_per_execution = 1 + self._initial_epoch = None + self._compute_loss_has_training_arg = 'training' in inspect.signature(self.compute_loss).parameters + self._compile_loss = None + self._compile_metrics = None + self._loss_tracker = None + + @traceback_utils.filter_traceback + @tracking.no_automatic_dependency_tracking + def compile(self, optimizer='rmsprop', loss=None, loss_weights=None, metrics=None, weighted_metrics=None, run_eagerly=False, steps_per_execution=1, jit_compile='auto', auto_scale_loss=True): + self._clear_previous_trainer_metrics() + optimizer = optimizers.get(optimizer) + self.optimizer = optimizer + if auto_scale_loss and self.dtype_policy.name == 'mixed_float16' and self.optimizer and (not isinstance(self.optimizer, LossScaleOptimizer)): + self.optimizer = LossScaleOptimizer(self.optimizer, name='loss_scale_optimizer') + if hasattr(self, 'output_names'): + output_names = self.output_names + else: + output_names = None + if loss is not None: + self._compile_loss = CompileLoss(loss, loss_weights, output_names=output_names) + self.loss = loss + if metrics is not None or weighted_metrics is not None: + self._compile_metrics = CompileMetrics(metrics, weighted_metrics, output_names=output_names) + if jit_compile == 'auto': + if run_eagerly: + jit_compile = False + else: + jit_compile = self._resolve_auto_jit_compile() + if jit_compile and run_eagerly: + jit_compile = False + warnings.warn('If `run_eagerly` is True, then `jit_compile` cannot also be True. Disabling `jit_compile`.', stacklevel=2) + self.jit_compile = jit_compile + self.run_eagerly = run_eagerly + self.stop_training = False + self.compiled = True + self._loss_tracker = metrics_module.Mean(name='loss') + self.steps_per_execution = steps_per_execution + self.train_function = None + self.test_function = None + self.predict_function = None + self._compile_config = serialization_lib.SerializableDict(optimizer=optimizer, loss=loss, loss_weights=loss_weights, metrics=metrics, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, steps_per_execution=steps_per_execution, jit_compile=jit_compile) + + @property + def jit_compile(self): + if self._jit_compile is None: + self._jit_compile = self._resolve_auto_jit_compile() + return self._jit_compile + + @jit_compile.setter + def jit_compile(self, value): + if value and (not model_supports_jit(self)): + warnings.warn("Model doesn't support `jit_compile=True`. Proceeding with `jit_compile=False`.") + self._jit_compile = False + else: + self._jit_compile = value + + def _resolve_auto_jit_compile(self): + if backend.backend() == 'torch': + return False + if backend.backend() == 'tensorflow': + import tensorflow as tf + devices = tf.config.list_physical_devices() + if not list(filter(lambda x: x.device_type != 'CPU', devices)): + return False + if self._distribute_strategy: + return False + if model_supports_jit(self): + return True + return False + + @property + def run_eagerly(self): + return self._run_eagerly + + @run_eagerly.setter + def run_eagerly(self, value): + self._run_eagerly = value + + @property + def metrics(self): + metrics = [] + if self.compiled: + if self._loss_tracker is not None: + metrics.append(self._loss_tracker) + if self._compile_metrics is not None: + metrics.append(self._compile_metrics) + if self._compile_loss is not None: + metrics.extend(self._compile_loss.metrics) + metrics.extend(self._metrics) + for layer in self._flatten_layers(include_self=False): + if isinstance(layer, Trainer): + continue + metrics.extend(layer.metrics) + return metrics + + @property + def metrics_names(self): + return [m.name for m in self.metrics] + + def reset_metrics(self): + for m in self.metrics: + m.reset_state() + + def _get_own_metrics(self): + metrics = [] + if self._loss_tracker is not None: + metrics.append(self._loss_tracker) + if self._compile_metrics is not None: + metrics.append(self._compile_metrics) + if self._compile_loss is not None: + metrics.extend(self._compile_loss.metrics) + metrics.extend(self._metrics) + return metrics + + def _clear_previous_trainer_metrics(self): + for layer in self._flatten_layers(include_self=False): + if not isinstance(layer, Trainer): + continue + for m in self._get_own_metrics(): + layer._tracker.untrack(m) + layer._loss_tracker = None + layer._compile_metrics = None + if layer._compile_loss is not None: + layer._compile_loss._metrics.clear() + layer._metrics.clear() + + def compute_loss(self, x=None, y=None, y_pred=None, sample_weight=None, training=True): + del x + del training + losses = [] + if self._compile_loss is not None: + loss = self._compile_loss(y, y_pred, sample_weight) + if loss is not None: + losses.append(loss) + for loss in self.losses: + losses.append(ops.sum(ops.cast(loss, dtype=backend.floatx()))) + if backend.backend() != 'jax' and len(losses) == 0: + raise ValueError('No loss to compute. Provide a `loss` argument in `compile()`.') + if len(losses) == 1: + total_loss = losses[0] + elif len(losses) == 0: + total_loss = ops.zeros(()) + else: + total_loss = ops.sum(losses) + return total_loss + + def _compute_loss(self, x=None, y=None, y_pred=None, sample_weight=None, training=True): + if self._compute_loss_has_training_arg: + return self.compute_loss(x, y, y_pred, sample_weight, training=training) + else: + return self.compute_loss(x, y, y_pred, sample_weight) + + def stateless_compute_loss(self, trainable_variables, non_trainable_variables, metrics_variables, x=None, y=None, y_pred=None, sample_weight=None, training=True): + var_mapping = list(zip(self.trainable_variables, trainable_variables)) + var_mapping.extend(zip(self.non_trainable_variables, non_trainable_variables)) + var_mapping.extend(zip(self.metrics_variables, metrics_variables)) + with backend.StatelessScope(state_mapping=var_mapping) as scope: + loss = self._compute_loss(x, y, y_pred, sample_weight=sample_weight, training=training) + non_trainable_variables = [] + for v in self.non_trainable_variables: + new_v = scope.get_current_value(v) + non_trainable_variables.append(new_v) + metrics_variables = [] + for v in self.metrics_variables: + new_v = scope.get_current_value(v) + metrics_variables.append(new_v) + return (loss, (trainable_variables, non_trainable_variables, metrics_variables)) + + def compute_metrics(self, x, y, y_pred, sample_weight=None): + del x + if self._compile_metrics is not None: + self._compile_metrics.update_state(y, y_pred, sample_weight) + return self.get_metrics_result() + + def get_metrics_result(self): + return_metrics = {} + for metric in self.metrics: + result = metric.result() + if isinstance(result, dict): + return_metrics.update(result) + else: + return_metrics[metric.name] = result + return self._pythonify_logs(return_metrics) + + def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose='auto', callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1): + raise NotImplementedError + + def evaluate(self, x=None, y=None, batch_size=None, verbose='auto', sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs): + raise NotImplementedError + + def predict(self, x, batch_size=None, verbose='auto', steps=None, callbacks=None): + raise NotImplementedError + + def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False): + raise NotImplementedError + + def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False): + raise NotImplementedError + + def predict_on_batch(self, x): + raise NotImplementedError + + def get_compile_config(self): + if self.compiled and hasattr(self, '_compile_config'): + return self._compile_config.serialize() + + def compile_from_config(self, config): + has_overridden_compile = self.__class__.compile != Trainer.compile + if has_overridden_compile: + warnings.warn("`compile()` was not called as part of model loading because the model's `compile()` method is custom. All subclassed Models that have `compile()` overridden should also override `get_compile_config()` and `compile_from_config(config)`. Alternatively, you can call `compile()` manually after loading.", stacklevel=2) + return + config = serialization_lib.deserialize_keras_object(config) + self.compile(**config) + if hasattr(self, 'optimizer') and self.built: + self.optimizer.build(self.trainable_variables) + + def _should_eval(self, epoch, validation_freq): + epoch = epoch + 1 + if isinstance(validation_freq, int): + return epoch % validation_freq == 0 + elif isinstance(validation_freq, list): + return epoch in validation_freq + else: + raise ValueError(f'Expected `validation_freq` to be a list or int. Received: validation_freq={validation_freq} of the type {type(validation_freq)}.') + + def _pythonify_logs(self, logs): + with concurrent.futures.ThreadPoolExecutor() as executor: + result = self._pythonify_logs_inner(logs, executor) + for (key, future_value) in result.items(): + result[key] = future_value.result() + return result + + def _pythonify_logs_inner(self, logs, executor): + result = {} + for (key, value) in sorted(logs.items()): + if isinstance(value, dict): + result.update(self._pythonify_logs_inner(value, executor=executor)) + else: + result[key] = executor.submit(_async_float_cast, value) + return result + + def _get_metrics_result_or_logs(self, logs): + metric_logs = self.get_metrics_result() + if isinstance(logs, dict) and set(logs.keys()) == set(metric_logs.keys()): + return metric_logs + return logs + + def _flatten_metrics_in_order(self, logs): + metric_names = [] + for metric in self.metrics: + if isinstance(metric, CompileMetrics): + metric_names += [sub_metric.name for sub_metric in metric.metrics] + else: + metric_names.append(metric.name) + results = [] + for name in metric_names: + if name in logs: + results.append(logs[name]) + for key in sorted(logs.keys()): + if key not in metric_names: + results.append(logs[key]) + if len(results) == 1: + return results[0] + return results + + def _assert_compile_called(self, method_name=None): + if not self.compiled: + msg = 'You must call `compile()` before ' + if metrics_module: + msg += 'using the model.' + else: + msg += f'calling `{method_name}()`.' + raise ValueError(msg) + + def _symbolic_build(self, iterator=None, data_batch=None): + model_unbuilt = not all((layer.built for layer in self._flatten_layers())) + compile_metrics_unbuilt = self._compile_metrics is not None and (not self._compile_metrics.built) + compile_loss_unbuilt = self._compile_loss is not None and (not self._compile_loss.built) + optimizer_unbuilt = self.optimizer is not None and (not self.optimizer.built) + if model_unbuilt or compile_metrics_unbuilt or compile_loss_unbuilt: + + def to_symbolic_input(v): + if v is None: + return None + return backend.KerasTensor(v.shape, backend.standardize_dtype(v.dtype)) + if data_batch is None: + for (_, data) in iterator.enumerate_epoch(): + data_batch = data[0] + break + data_batch = tree.map_structure(to_symbolic_input, data_batch) + (x, y, sample_weight) = data_adapter_utils.unpack_x_y_sample_weight(data_batch) + try: + y_pred = backend.compute_output_spec(self, x, training=False) + except Exception as e: + raise RuntimeError(f"Unable to automatically build the model. Please build it yourself before calling fit/evaluate/predict. A model is 'built' when its variables have been created and its `self.built` attribute is True. Usually, calling the model on a batch of data is the right way to build it.\nException encountered:\n'{e}'") + if compile_metrics_unbuilt: + backend.compute_output_spec(self.compute_metrics, x, y, y_pred, sample_weight=sample_weight) + if compile_loss_unbuilt: + backend.compute_output_spec(self._compute_loss, x, y, y_pred, sample_weight=sample_weight, training=False) + if optimizer_unbuilt: + self.optimizer.build(self.trainable_variables) + self._post_build() + +def model_supports_jit(model): + if platform.system() == 'Darwin' and 'arm' in platform.processor().lower(): + if backend.backend() == 'tensorflow': + from keras.src.utils.module_utils import tensorflow as tf + if tf.config.list_physical_devices('GPU'): + return False + if all((x.supports_jit for x in model._flatten_layers())): + return True + return False + +def _async_float_cast(value): + try: + value = float(value) + except: + pass + return value + +# File: keras-master/keras/src/tree/__init__.py +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import register_tree_node_class +from keras.src.tree.tree_api import traverse + +# File: keras-master/keras/src/tree/dmtree_impl.py +from keras.src.utils.module_utils import dmtree + +def register_tree_node_class(cls): + return cls + +def is_nested(structure): + return dmtree.is_nested(structure) + +def traverse(func, structure, top_down=True): + return dmtree.traverse(func, structure, top_down=top_down) + +def flatten(structure): + return dmtree.flatten(structure) + +def map_structure(func, *structures): + return dmtree.map_structure(func, *structures) + +def map_structure_up_to(shallow_structure, func, *structures): + return dmtree.map_structure_up_to(shallow_structure, func, *structures) + +def assert_same_structure(a, b, check_types=True): + return dmtree.assert_same_structure(a, b, check_types=check_types) + +def pack_sequence_as(structure, flat_sequence, sequence_fn=None): + is_nested_fn = dmtree.is_nested + sequence_fn = sequence_fn or dmtree._sequence_like + + def truncate(value, length): + value_str = str(value) + return value_str[:length] + (value_str[length:] and '...') + if not is_nested_fn(flat_sequence): + raise TypeError('Attempted to pack value:\n {}\ninto a structure, but found incompatible type `{}` instead.'.format(truncate(flat_sequence, 100), type(flat_sequence))) + if not is_nested_fn(structure): + if len(flat_sequence) != 1: + raise ValueError('The target structure is of type `{}`\n {}\nHowever the input is a sequence ({}) of length {}.\n {}\nnest cannot guarantee that it is safe to map one to the other.'.format(type(structure), truncate(structure, 100), type(flat_sequence), len(flat_sequence), truncate(flat_sequence, 100))) + return flat_sequence[0] + try: + (final_index, packed) = packed_nest_with_indices(structure, flat_sequence, 0, is_nested_fn, sequence_fn) + if final_index < len(flat_sequence): + raise IndexError + except IndexError: + flat_structure = dmtree.flatten(structure) + if len(flat_structure) != len(flat_sequence): + raise ValueError(f'Could not pack sequence. Structure had {len(flat_structure)} atoms, but flat_sequence had {len(flat_sequence)} items. Structure: {structure}, flat_sequence: {flat_sequence}.') + return sequence_fn(structure, packed) + +def packed_nest_with_indices(structure, flat, index, is_nested_fn, sequence_fn=None): + packed = [] + sequence_fn = sequence_fn or dmtree._sequence_like + for s in yield_value(structure): + if is_nested_fn(s): + (new_index, child) = packed_nest_with_indices(s, flat, index, is_nested_fn, sequence_fn) + packed.append(sequence_fn(s, child)) + index = new_index + else: + packed.append(flat[index]) + index += 1 + return (index, packed) + +def yield_value(iterable): + for (_, v) in dmtree._yield_sorted_items(iterable): + yield v + +def lists_to_tuples(structure): + + def sequence_fn(instance, args): + if isinstance(instance, list): + return tuple(args) + return dmtree._sequence_like(instance, args) + return pack_sequence_as(structure, dmtree.flatten(structure), sequence_fn=sequence_fn) + +def is_shape_tuple(x): + if isinstance(x, (list, tuple)): + if all((isinstance(e, (int, type(None))) for e in x)): + return True + return False + +def map_shape_structure(func, structure): + if is_shape_tuple(structure): + return func(tuple(structure)) + if isinstance(structure, list): + return [map_shape_structure(func, e) for e in structure] + if isinstance(structure, tuple): + return tuple((map_shape_structure(func, e) for e in structure)) + if isinstance(structure, dict): + return {k: map_shape_structure(func, v) for (k, v) in structure.items()} + else: + raise ValueError(f'Cannot map function to unknown object {structure}') + +# File: keras-master/keras/src/tree/optree_impl.py +import collections +import collections.abc +import types +import optree +import optree.utils +from keras.src.backend.config import backend + +def register_tree_node_class(cls): + return optree.register_pytree_node_class(cls, namespace='keras') +if backend() == 'tensorflow': + from tensorflow.python.trackable.data_structures import ListWrapper + optree.register_pytree_node(ListWrapper, lambda x: (x, None), lambda metadata, children: ListWrapper(list(children)), namespace='keras') + +def is_nested(structure): + return not optree.tree_is_leaf(structure, none_is_leaf=True, namespace='keras') + +def traverse(func, structure, top_down=True): + + def traverse_children(): + (children, treedef) = optree.tree_flatten(structure, is_leaf=lambda x: x is not structure, none_is_leaf=True, namespace='keras') + if treedef.num_nodes == 1 and treedef.num_leaves == 1: + return structure + else: + return optree.tree_unflatten(treedef, [traverse(func, c, top_down=top_down) for c in children]) + if top_down: + ret = func(structure) + if ret is None: + return traverse_children() + else: + traversed_structure = traverse_children() + ret = func(traversed_structure) + if ret is None: + return traversed_structure + return None if ret is _MAP_TO_NONE else ret + +def flatten(structure): + (leaves, _) = optree.tree_flatten(structure, none_is_leaf=True, namespace='keras') + return leaves + +def map_structure(func, *structures): + if not callable(func): + raise TypeError(f'`func` must be callable. Received: func={func}') + if not structures: + raise ValueError('Must provide at least one structure') + for other in structures[1:]: + assert_same_structure(structures[0], other, check_types=False) + return optree.tree_map(func, *structures, none_is_leaf=True, namespace='keras') + +def map_structure_up_to(shallow_structure, func, *structures): + return _map_structure_with_path_up_to(shallow_structure, lambda _, *args: func(*args), *structures) + +def assert_same_structure(a, b, check_types=True): + a_structure = optree.tree_structure(a, none_is_leaf=True, namespace='keras') + b_structure = optree.tree_structure(b, none_is_leaf=True, namespace='keras') + if a_structure != b_structure: + raise ValueError(f"`a` and `b` don't have the same structure. Received: structure of a={a_structure}, structure of b={b_structure}") + if check_types: + type_structure = optree.tree_map(lambda x, y: type(x) is type(y), a, b, none_is_leaf=True, namespace='keras') + if not optree.tree_all(type_structure, none_is_leaf=True, namespace='keras'): + raise TypeError("The type of the leaves of `a` and `b` doesn't match.") + +def pack_sequence_as(structure, flat_sequence, sequence_fn=None): + sequence_fn = sequence_fn or _sequence_like + + def truncate(value, length): + value_str = str(value) + return value_str[:length] + (value_str[length:] and '...') + if not is_nested(flat_sequence): + raise TypeError('Attempted to pack value:\n {}\ninto a structure, but found incompatible type `{}` instead.'.format(truncate(flat_sequence, 100), type(flat_sequence))) + if not is_nested(structure): + if len(flat_sequence) != 1: + raise ValueError('The target structure is of type `{}`\n {}\nHowever the input is a sequence ({}) of length {}.\n {}\nnest cannot guarantee that it is safe to map one to the other.'.format(type(structure), truncate(structure, 100), type(flat_sequence), len(flat_sequence), truncate(flat_sequence, 100))) + return flat_sequence[0] + try: + (final_index, packed) = _packed_nest_with_indices(structure, flat_sequence, 0, sequence_fn) + if final_index < len(flat_sequence): + raise IndexError + except IndexError: + flat_structure = flatten(structure) + if len(flat_structure) != len(flat_sequence): + raise ValueError(f'Could not pack sequence. Structure had {len(flat_structure)} atoms, but flat_sequence had {len(flat_sequence)} items. Structure: {structure}, flat_sequence: {flat_sequence}.') + return sequence_fn(structure, packed) + +def lists_to_tuples(structure): + + def sequence_fn(instance, args): + if isinstance(instance, list): + return tuple(args) + return _sequence_like(instance, args) + return pack_sequence_as(structure, flatten(structure), sequence_fn=sequence_fn) + +def map_shape_structure(func, structure): + + def is_shape_tuple(x): + return isinstance(x, (list, tuple)) and all((isinstance(e, (int, type(None))) for e in x)) + if not callable(func): + raise TypeError(f'`func` must be callable. Received: func={func}') + return optree.tree_map(func, structure, is_leaf=is_shape_tuple, none_is_leaf=True, namespace='keras') + +class _MapToNone: + + def __repr__(self): + return 'keras.utils.tree._MAP_TO_NONE' +_MAP_TO_NONE = _MapToNone() + +def _yield_flat_up_to(shallow_tree, input_tree, path=()): + if isinstance(shallow_tree, (str, bytes)) or not (isinstance(shallow_tree, (collections.abc.Mapping, collections.abc.Sequence)) or optree.is_namedtuple(shallow_tree)): + yield (path, input_tree) + else: + input_tree = dict(_yield_sorted_items(input_tree)) + for (shallow_key, shallow_subtree) in _yield_sorted_items(shallow_tree): + subpath = path + (shallow_key,) + input_subtree = input_tree[shallow_key] + for (leaf_path, leaf_value) in _yield_flat_up_to(shallow_subtree, input_subtree, path=subpath): + yield (leaf_path, leaf_value) + +def _multiyield_flat_up_to(shallow_tree, *input_trees): + zipped_iterators = zip(*[_yield_flat_up_to(shallow_tree, input_tree) for input_tree in input_trees]) + try: + for paths_and_values in zipped_iterators: + (paths, values) = zip(*paths_and_values) + yield (paths[:1] + values) + except KeyError as e: + paths = locals().get('paths', ((),)) + raise ValueError(f"Could not find key '{e.args[0]}' in some `input_trees`. Please ensure the structure of all `input_trees` are compatible with `shallow_tree`. The last valid path yielded was {paths[0]}.") from e + +def _map_structure_with_path_up_to(shallow_structure, func, *structures): + results = [] + for path_and_values in _multiyield_flat_up_to(shallow_structure, *structures): + results.append(func(*path_and_values)) + shallow_structure_spec = optree.tree_structure(shallow_structure, none_is_leaf=True, namespace='keras') + return shallow_structure_spec.unflatten(results) + +def _sequence_like(instance, args): + if isinstance(instance, (dict, collections.abc.Mapping)): + result = dict(zip(sorted(instance), args)) + keys_and_values = ((key, result[key]) for key in instance) + if isinstance(instance, collections.defaultdict): + return type(instance)(instance.default_factory, keys_and_values) + elif isinstance(instance, types.MappingProxyType): + return type(instance)(dict(keys_and_values)) + else: + return type(instance)(keys_and_values) + elif isinstance(instance, collections.abc.MappingView): + return list(args) + elif optree.is_namedtuple(instance): + instance_type = type(instance) + try: + return instance_type(*args) + except Exception as e: + raise TypeError(f"Couldn't traverse {instance!r} with arguments {args}") from e + else: + return type(instance)(args) + +def _yield_sorted_items(iterable): + if isinstance(iterable, collections.abc.Mapping): + for key in sorted(iterable): + yield (key, iterable[key]) + elif optree.is_namedtuple(iterable): + for field in iterable._fields: + yield (field, getattr(iterable, field)) + else: + for item in enumerate(iterable): + yield item + +def _yield_value(iterable): + for (_, v) in _yield_sorted_items(iterable): + yield v + +def _packed_nest_with_indices(structure, flat, index, sequence_fn=None): + packed = [] + sequence_fn = sequence_fn or _sequence_like + for s in _yield_value(structure): + if is_nested(s): + (new_index, child) = _packed_nest_with_indices(s, flat, index, sequence_fn) + packed.append(sequence_fn(s, child)) + index = new_index + else: + packed.append(flat[index]) + index += 1 + return (index, packed) + +# File: keras-master/keras/src/tree/tree_api.py +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import dmtree +from keras.src.utils.module_utils import optree +if optree.available: + from keras.src.tree import optree_impl as tree_impl +elif dmtree.available: + from keras.src.tree import dmtree_impl as tree_impl +else: + raise ImportError('To use Keras, you need to have `optree` installed. Install it via `pip install optree`') + +def register_tree_node_class(cls): + return tree_impl.register_tree_node_class(cls) + +@keras_export('keras.tree.is_nested') +def is_nested(structure): + return tree_impl.is_nested(structure) + +@keras_export('keras.tree.traverse') +def traverse(func, structure, top_down=True): + return tree_impl.traverse(func, structure, top_down=top_down) + +@keras_export('keras.tree.flatten') +def flatten(structure): + return tree_impl.flatten(structure) + +@keras_export('keras.tree.map_structure') +def map_structure(func, *structures): + return tree_impl.map_structure(func, *structures) + +@keras_export('keras.tree.map_structure_up_to') +def map_structure_up_to(shallow_structure, func, *structures): + return tree_impl.map_structure_up_to(shallow_structure, func, *structures) + +@keras_export('keras.tree.assert_same_structure') +def assert_same_structure(a, b, check_types=True): + return tree_impl.assert_same_structure(a, b, check_types=check_types) + +@keras_export('keras.tree.pack_sequence_as') +def pack_sequence_as(structure, flat_sequence, sequence_fn=None): + return tree_impl.pack_sequence_as(structure, flat_sequence, sequence_fn=sequence_fn) + +@keras_export('keras.tree.lists_to_tuples') +def lists_to_tuples(structure): + return tree_impl.lists_to_tuples(structure) + +@keras_export('keras.tree.map_shape_structure') +def map_shape_structure(func, structure): + return tree_impl.map_shape_structure(func, structure) + +# File: keras-master/pip_build.py +"""""" +import argparse +import datetime +import glob +import os +import pathlib +import re +import shutil +import torch +package = 'keras' +build_directory = 'tmp_build_dir' +dist_directory = 'dist' +to_copy = ['setup.py', 'README.md'] + +def export_version_string(version, is_nightly=False, rc_index=None): + if is_nightly: + date = datetime.datetime.now() + version += f".dev{date.strftime('%Y%m%d%H')}" + with open('setup.py') as f: + setup_contents = f.read() + with open('setup.py', 'w') as f: + setup_contents = setup_contents.replace('name="keras"', 'name="keras-nightly"') + f.write(setup_contents) + elif rc_index is not None: + version += 'rc' + str(rc_index) + with open(os.path.join(package, 'src', 'version.py')) as f: + init_contents = f.read() + with open(os.path.join(package, 'src', 'version.py'), 'w') as f: + init_contents = re.sub('\n__version__ = .*\n', f'\n__version__ = "{version}"\n', init_contents) + f.write(init_contents) + +def ignore_files(_, filenames): + return [f for f in filenames if f.endswith('_test.py')] + +def copy_source_to_build_directory(root_path): + os.chdir(root_path) + os.mkdir(build_directory) + shutil.copytree(package, os.path.join(build_directory, package), ignore=ignore_files) + for fname in to_copy: + shutil.copy(fname, os.path.join(f'{build_directory}', fname)) + os.chdir(build_directory) + +def build(root_path, is_nightly=False, rc_index=None): + if os.path.exists(build_directory): + raise ValueError(f'Directory already exists: {build_directory}') + try: + copy_source_to_build_directory(root_path) + move_tf_keras_directory() + from keras.src.version import __version__ + export_version_string(__version__, is_nightly, rc_index) + return build_and_save_output(root_path, __version__) + finally: + shutil.rmtree(build_directory) + +def move_tf_keras_directory(): + shutil.move(os.path.join(package, 'api', '_tf_keras'), 'keras') + with open(os.path.join(package, 'api', '__init__.py')) as f: + contents = f.read() + contents = contents.replace('from keras.api import _tf_keras', '') + with open(os.path.join(package, 'api', '__init__.py'), 'w') as f: + f.write(contents) + for (root, _, fnames) in os.walk(os.path.join(package, '_tf_keras')): + for fname in fnames: + if fname.endswith('.py'): + tf_keras_fpath = os.path.join(root, fname) + with open(tf_keras_fpath) as f: + contents = f.read() + contents = contents.replace('keras.api._tf_keras', 'keras._tf_keras') + with open(tf_keras_fpath, 'w') as f: + f.write(contents) + +def build_and_save_output(root_path, __version__): + os.system('python3 -m build') + os.chdir(root_path) + if not os.path.exists(dist_directory): + os.mkdir(dist_directory) + for fpath in glob.glob(os.path.join(build_directory, dist_directory, '*.*')): + shutil.copy(fpath, dist_directory) + whl_path = None + for fname in os.listdir(dist_directory): + if __version__ in fname and fname.endswith('.whl'): + whl_path = os.path.abspath(os.path.join(dist_directory, fname)) + if whl_path: + print(f'Build successful. Wheel file available at {whl_path}') + else: + print('Build failed.') + return whl_path + +def install_whl(whl_fpath): + print(f'Installing wheel file: {whl_fpath}') + os.system(f'pip3 install {whl_fpath} --force-reinstall --no-dependencies') +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--install', action='store_true', help='Whether to install the generated wheel file.') + parser.add_argument('--nightly', action='store_true', help='Whether to generate nightly wheel file.') + parser.add_argument('--rc', type=int, help='Specify `[0-9] when generating RC wheels.') + args = parser.parse_args() + root_path = pathlib.Path(__file__).parent.resolve() + whl_path = build(root_path, args.nightly, args.rc) + if whl_path and args.install: + install_whl(whl_path) +