Skip to content

Commit

Permalink
Update imports
Browse files Browse the repository at this point in the history
  • Loading branch information
laurentm committed Aug 11, 2022
1 parent c8cec25 commit 59a97b1
Show file tree
Hide file tree
Showing 32 changed files with 78 additions and 78 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ The [notebooks](notebooks) section provides some examples. For instance, just ru
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.models import Sequential
from keras.models import Sequential

from keras_fsl.models.encoders import BasicCNN
from keras_fsl.layers import GramMatrix
Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/callbacks/binary_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import numpy as np
import tensorflow as tf
from matplotlib.backends.backend_agg import FigureCanvasAgg
from tensorflow.keras.callbacks import Callback
from keras.callbacks import Callback


class BinaryStatistics(Callback):
Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/layers/centroids_matrix.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import tensorflow as tf
from tensorflow.keras import activations
from keras import activations

from keras_fsl.layers.support_layer import SupportLayer

Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/layers/classification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import tensorflow as tf
from tensorflow.keras.layers import Layer
from keras.layers import Layer

from keras_fsl.losses import class_consistency_loss

Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/layers/slicing.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Lambda, Flatten
from keras import Sequential
from keras.layers import Lambda, Flatten


def CenterSlicing2D():
Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/layers/support_layer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import tensorflow as tf
from tensorflow.keras.layers import Layer
from keras.layers import Layer

from keras_fsl.models import head_models

Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/losses/gram_matrix_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
from tensorflow.keras.losses import Loss
from keras.losses import Loss


class MeanScoreClassificationLoss(Loss):
Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/metrics/gram_matrix_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
instance). y_true should be one-hot encoded
"""
import tensorflow as tf
from tensorflow.keras import backend as K
from keras import backend as K


def classification_accuracy(ascending=False):
Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/models/encoders/basic_cnn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from tensorflow.keras.models import Sequential
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential


def BasicCNN(input_shape, classes=None):
Expand Down
6 changes: 3 additions & 3 deletions keras_fsl/models/encoders/darknet.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from functools import wraps

from tensorflow.keras import Sequential, Input, Model
from tensorflow.keras.layers import Conv2D, LeakyReLU, ZeroPadding2D, Add, MaxPooling2D, BatchNormalization
from tensorflow.keras.regularizers import l2
from keras import Sequential, Input, Model
from keras.layers import Conv2D, LeakyReLU, ZeroPadding2D, Add, MaxPooling2D, BatchNormalization
from keras.regularizers import l2


@wraps(Conv2D)
Expand Down
8 changes: 4 additions & 4 deletions keras_fsl/models/encoders/koch_net.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from tensorflow.keras import Sequential, Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.regularizers import l2
from keras import Sequential, Input
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from keras.initializers import RandomNormal
from keras.regularizers import l2


def conv_2d(*args, **kwargs):
Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/models/encoders/single_conv_2d.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, GlobalAveragePooling2D
from keras import Sequential
from keras.layers import Conv2D, GlobalAveragePooling2D


def SingleConv2D(input_shape):
Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/models/encoders/vinyals_net.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Input, BatchNormalization, Activation, MaxPooling2D, Flatten
from keras import Sequential
from keras.layers import Conv2D, Input, BatchNormalization, Activation, MaxPooling2D, Flatten


def conv_block(*args, **kwargs):
Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/models/head_models/dense_sigmoid.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Lambda
from tensorflow.keras.models import Model
from keras.layers import Dense, Input, Lambda
from keras.models import Model


def DenseSigmoid(input_shape, use_bias=True):
Expand Down
8 changes: 4 additions & 4 deletions keras_fsl/models/head_models/learnt_norms.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.layers import (
from keras import activations
from keras.layers import (
Concatenate,
Conv2D,
Dense,
Flatten,
Input,
Reshape,
)
from tensorflow.keras.mixed_precision.experimental import global_policy
from tensorflow.keras.models import Model
from keras.mixed_precision import global_policy
from keras.models import Model
from tensorflow.python.keras.layers import Activation


Expand Down
6 changes: 3 additions & 3 deletions keras_fsl/models/head_models/mixed_norms.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.layers import (
from keras import activations
from keras.layers import (
Concatenate,
Conv2D,
Dense,
Expand All @@ -10,7 +10,7 @@
Lambda,
Reshape,
)
from tensorflow.keras.models import Model
from keras.models import Model


def MixedNorms(input_shape, norms=None, use_bias=True, activation="sigmoid"):
Expand Down
6 changes: 3 additions & 3 deletions keras_fsl/models/head_models/tests/learnt_norms_test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow.keras.optimizers import RMSprop
from keras.optimizers import RMSprop
from tensorflow.python.keras.keras_parameterized import TestCase, run_all_keras_modes, run_with_all_model_types

from keras_fsl.models.head_models import LearntNorms
Expand Down Expand Up @@ -34,8 +34,8 @@ def test_should_fit(self, input_shape):
("float64", "float64", "float64"),
)
def test_last_activation_fp32_in_mixed_precision(self, mixed_precision_policy, expected_last_layer_dtype_policy):
policy = tf.keras.mixed_precision.experimental.Policy(mixed_precision_policy)
tf.keras.mixed_precision.experimental.set_policy(policy)
policy = tf.keras.mixed_precision.Policy(mixed_precision_policy)
tf.keras.mixed_precision.set_policy(policy)
learnt_norms = LearntNorms(input_shape=(10,))

# Check dtype policy of internal non-input layers
Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/models/siamese_nets.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from tensorflow.keras import Model
from tensorflow.keras.layers import Input
from keras import Model
from keras.layers import Input

from keras_fsl.models import encoders, head_models

Expand Down
4 changes: 2 additions & 2 deletions keras_fsl/sequences/abstract_sequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import imgaug.augmenters as iaa
import numpy as np
from abc import ABCMeta
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.utils import Sequence
from keras.preprocessing.image import img_to_array, load_img
from keras.utils import Sequence


class AbstractSequence(Sequence, metaclass=ABCMeta):
Expand Down
2 changes: 1 addition & 1 deletion keras_fsl/sequences/prediction/pairs/product_sequence.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math

import pandas as pd
from tensorflow.keras.utils import Sequence
from keras.utils import Sequence


class ProductSequence(Sequence):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math

import pandas as pd
from tensorflow.keras.utils import Sequence
from keras.utils import Sequence


class RandomProductSequence(Sequence):
Expand Down
12 changes: 6 additions & 6 deletions notebooks/batch_gram_matrix_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
import click
import pandas as pd
import tensorflow as tf
from tensorflow.keras import applications as keras_applications
from tensorflow.keras.callbacks import (
from keras import applications as keras_applications
from keras.callbacks import (
ModelCheckpoint,
ReduceLROnPlateau,
TensorBoard,
)
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from keras.models import Sequential
from keras.optimizers import Adam

from keras_fsl.dataframe.operators import ToKShotDataset
from keras_fsl.layers import Classification, GramMatrix
Expand All @@ -22,8 +22,8 @@
#%% Toggle some config if required
# tf.config.experimental_run_functions_eagerly(True)
# tf.config.optimizer.set_jit(True)
# policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16")
# tf.keras.mixed_precision.experimental.set_policy(policy)
# policy = tf.keras.mixed_precision.Policy("mixed_float16")
# tf.keras.mixed_precision.set_policy(policy)


#%% CLI args
Expand Down
4 changes: 2 additions & 2 deletions notebooks/benchmark_caching_performance.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1897,8 +1897,8 @@
"import tensorflow_datasets as tfds\n",
"from functools import partial\n",
"from gpumonitor.callbacks.tf import TFGpuMonitorCallback\n",
"from tensorflow.keras.layers import Conv2D, Dropout, Flatten, GlobalMaxPooling2D, Input, MaxPooling2D\n",
"from tensorflow.keras.models import Sequential\n",
"from keras.layers import Conv2D, Dropout, Flatten, GlobalMaxPooling2D, Input, MaxPooling2D\n",
"from keras.models import Sequential\n",
"\n",
"from keras_fsl.utils.datasets import assign, cache, cache_with_tf_record, read_decode_and_crop_jpeg, transform"
],
Expand Down
2 changes: 1 addition & 1 deletion notebooks/build_siamese_model_for_serving.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# flake8: noqa: E265
import tensorflow as tf
from tensorflow.keras.models import load_model
from keras.models import load_model

#%% Load siamese nets
classifier = load_model("siamese_nets_classifier/1")
Expand Down
14 changes: 7 additions & 7 deletions notebooks/centroids_similarity_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
import click
import pandas as pd
import tensorflow as tf
from tensorflow.keras import applications as keras_applications
from tensorflow.keras.callbacks import (
from keras import applications as keras_applications
from keras.callbacks import (
ModelCheckpoint,
ReduceLROnPlateau,
TensorBoard,
)
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam

from keras_fsl.dataframe.operators import ToKShotDataset
from keras_fsl.layers import CentroidsMatrix
Expand All @@ -22,8 +22,8 @@
#%% Toggle some config if required
# tf.config.experimental_run_functions_eagerly(True)
# tf.config.optimizer.set_jit(True)
# policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16")
# tf.keras.mixed_precision.experimental.set_policy(policy)
# policy = tf.keras.mixed_precision.Policy("mixed_float16")
# tf.keras.mixed_precision.set_policy(policy)


#%% CLI args
Expand Down
4 changes: 2 additions & 2 deletions notebooks/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import metrics
from keras.models import load_model
from keras import metrics

#%% Init
k_shot = 4
Expand Down
6 changes: 3 additions & 3 deletions notebooks/omniglot/basic_siamese_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@

import imgaug.augmenters as iaa
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.optimizer_v2.adam import Adam
from tensorflow.keras.saving import load_model
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping
from keras.optimizer_v2.adam import Adam
from keras.saving import load_model

from keras_fsl.datasets import omniglot
from keras_fsl.models import SiameseNets
Expand Down
8 changes: 4 additions & 4 deletions notebooks/siamese_nets_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
import pandas as pd
import tensorflow as tf
import yaml
from tensorflow.keras.models import load_model
from tensorflow.keras import applications as keras_applications
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from tensorflow.keras.optimizers import Adam
from keras.models import load_model
from keras import applications as keras_applications
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras.optimizers import Adam

from keras_fsl.models import SiameseNets
from keras_fsl.sequences import training
Expand Down
8 changes: 4 additions & 4 deletions notebooks/supervised_gram_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
import pandas as pd
import tensorflow as tf
from tensorboard.plugins import projector
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Lambda
from tensorflow.keras.utils import to_categorical
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Lambda
from keras.utils import to_categorical

from keras_fsl.models.encoders import BasicCNN
from keras_fsl.losses import BinaryCrossentropy, class_consistency_loss
Expand Down
6 changes: 3 additions & 3 deletions notebooks/triplet_loss_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Conv2D, Dense, Dropout, GlobalMaxPooling2D, Input, Flatten, MaxPooling2D, Lambda
from tensorflow.keras.models import Sequential
from keras.callbacks import TensorBoard
from keras.layers import Conv2D, Dense, Dropout, GlobalMaxPooling2D, Input, Flatten, MaxPooling2D, Lambda
from keras.models import Sequential

from keras_fsl.layers import GramMatrix
from keras_fsl.losses.gram_matrix_losses import BinaryCrossentropy, class_consistency_loss, TripletLoss
Expand Down
6 changes: 3 additions & 3 deletions notebooks/triplet_loss_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Lambda, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import TensorBoard
from keras.layers import Conv2D, Dense, Dropout, Flatten, Lambda, MaxPooling2D
from keras.models import Sequential
from keras.callbacks import TensorBoard

from keras_fsl.losses.gram_matrix_losses import triplet_loss
from keras_fsl.layers import GramMatrix
Expand Down
6 changes: 3 additions & 3 deletions notebooks/unsupervised_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.layers import Conv2D, Dense, Dropout, GlobalMaxPooling2D, Input, Flatten, MaxPooling2D, Lambda
from tensorflow.keras.models import Sequential
from keras.callbacks import EarlyStopping, TensorBoard
from keras.layers import Conv2D, Dense, Dropout, GlobalMaxPooling2D, Input, Flatten, MaxPooling2D, Lambda
from keras.models import Sequential

from keras_fsl.layers import GramMatrix
from keras_fsl.losses.gram_matrix_losses import ClippedBinaryCrossentropy, ClassConsistencyLoss
Expand Down

0 comments on commit 59a97b1

Please sign in to comment.