diff --git a/docs/source/kerasga.rst b/docs/source/kerasga.rst index f39ffec6..537e63df 100644 --- a/docs/source/kerasga.rst +++ b/docs/source/kerasga.rst @@ -70,13 +70,13 @@ Here is an example of a model created using the Sequential Model. .. code:: python - import tensorflow.keras + import keras - input_layer = tensorflow.keras.layers.Input(3) - dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu") - output_layer = tensorflow.keras.layers.Dense(1, activation="linear") + input_layer = keras.layers.Input(3) + dense_layer1 = keras.layers.Dense(5, activation="relu") + output_layer = keras.layers.Dense(1, activation="linear") - model = tensorflow.keras.Sequential() + model = keras.Sequential() model.add(input_layer) model.add(dense_layer1) model.add(output_layer) @@ -85,11 +85,11 @@ This is the same model created using the Functional API. .. code:: python - input_layer = tensorflow.keras.layers.Input(3) - dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1) + input_layer = keras.layers.Input(3) + dense_layer1 = keras.layers.Dense(5, activation="relu")(input_layer) + output_layer = keras.layers.Dense(1, activation="linear")(dense_layer1) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) Feel free to add the layers of your choice. @@ -235,7 +235,7 @@ subsections discuss each part in the code. .. code:: python - import tensorflow.keras + import keras import pygad.kerasga import numpy import pygad @@ -247,7 +247,7 @@ subsections discuss each part in the code. solution=solution, data=data_inputs) - mae = tensorflow.keras.losses.MeanAbsoluteError() + mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() + 0.00000001 solution_fitness = 1.0/abs_error @@ -257,11 +257,11 @@ subsections discuss each part in the code. print(f"Generation = {ga_instance.generations_completed}") print(f"Fitness = {ga_instance.best_solution()[1]}") - input_layer = tensorflow.keras.layers.Input(3) - dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1) + input_layer = keras.layers.Input(3) + dense_layer1 = keras.layers.Dense(5, activation="relu")(input_layer) + output_layer = keras.layers.Dense(1, activation="linear")(dense_layer1) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) keras_ga = pygad.kerasga.KerasGA(model=model, num_solutions=10) @@ -305,7 +305,7 @@ subsections discuss each part in the code. data=data_inputs) print(f"Predictions : \n{predictions}") - mae = tensorflow.keras.losses.MeanAbsoluteError() + mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() print(f"Absolute Error : {abs_error}") @@ -318,23 +318,23 @@ Functional API. .. code:: python - import tensorflow.keras + import keras - input_layer = tensorflow.keras.layers.Input(3) - dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1) + input_layer = keras.layers.Input(3) + dense_layer1 = keras.layers.Dense(5, activation="relu")(input_layer) + output_layer = keras.layers.Dense(1, activation="linear")(dense_layer1) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) The model can also be build using the Keras Sequential Model API. .. code:: python - input_layer = tensorflow.keras.layers.Input(3) - dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu") - output_layer = tensorflow.keras.layers.Dense(1, activation="linear") + input_layer = keras.layers.Input(3) + dense_layer1 = keras.layers.Dense(5, activation="relu") + output_layer = keras.layers.Dense(1, activation="linear") - model = tensorflow.keras.Sequential() + model = keras.Sequential() model.add(input_layer) model.add(dense_layer1) model.add(output_layer) @@ -400,7 +400,7 @@ Feel free to use any other loss function to calculate the fitness value. solution=solution, data=data_inputs) - mae = tensorflow.keras.losses.MeanAbsoluteError() + mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() + 0.00000001 solution_fitness = 1.0/abs_error @@ -492,7 +492,7 @@ The next code measures the trained model error. .. code:: python - mae = tensorflow.keras.losses.MeanAbsoluteError() + mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() print(f"Absolute Error : {abs_error}") @@ -509,7 +509,7 @@ previous example. .. code:: python - import tensorflow.keras + import keras import pygad.kerasga import numpy import pygad @@ -521,7 +521,7 @@ previous example. solution=solution, data=data_inputs) - bce = tensorflow.keras.losses.BinaryCrossentropy() + bce = keras.losses.BinaryCrossentropy() solution_fitness = 1.0 / (bce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -531,11 +531,11 @@ previous example. print(f"Fitness = {ga_instance.best_solution()[1]}") # Build the keras model using the functional API. - input_layer = tensorflow.keras.layers.Input(2) - dense_layer = tensorflow.keras.layers.Dense(4, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(2, activation="softmax")(dense_layer) + input_layer = keras.layers.Input(2) + dense_layer = keras.layers.Dense(4, activation="relu")(input_layer) + output_layer = keras.layers.Dense(2, activation="softmax")(dense_layer) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -583,11 +583,11 @@ previous example. print(f"Predictions : \n{predictions}") # Calculate the binary crossentropy for the trained model. - bce = tensorflow.keras.losses.BinaryCrossentropy() + bce = keras.losses.BinaryCrossentropy() print("Binary Crossentropy : ", bce(data_outputs, predictions).numpy()) # Calculate the classification accuracy for the trained model. - ba = tensorflow.keras.metrics.BinaryAccuracy() + ba = keras.metrics.BinaryAccuracy() ba.update_state(data_outputs, predictions) accuracy = ba.result().numpy() print(f"Accuracy : {accuracy}") @@ -601,11 +601,11 @@ Compared to the previous regression example, here are the changes: .. code:: python # Build the keras model using the functional API. - input_layer = tensorflow.keras.layers.Input(2) - dense_layer = tensorflow.keras.layers.Dense(4, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(2, activation="softmax")(dense_layer) + input_layer = keras.layers.Input(2) + dense_layer = keras.layers.Dense(4, activation="relu")(input_layer) + output_layer = keras.layers.Dense(2, activation="softmax")(dense_layer) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) - The train data is changed. Note that the output of each sample is a 1D vector of 2 values, 1 for each class. @@ -628,7 +628,7 @@ Compared to the previous regression example, here are the changes: .. code:: python - bce = tensorflow.keras.losses.BinaryCrossentropy() + bce = keras.losses.BinaryCrossentropy() solution_fitness = 1.0 / (bce(data_outputs, predictions).numpy() + 0.00000001) After the previous code completes, the next figure shows how the fitness @@ -662,7 +662,7 @@ Here is the code. .. code:: python - import tensorflow.keras + import keras import pygad.kerasga import numpy import pygad @@ -674,7 +674,7 @@ Here is the code. solution=solution, data=data_inputs) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -684,11 +684,11 @@ Here is the code. print(f"Fitness = {ga_instance.best_solution()[1]}") # Build the keras model using the functional API. - input_layer = tensorflow.keras.layers.Input(360) - dense_layer = tensorflow.keras.layers.Dense(50, activation="relu")(input_layer) - output_layer = tensorflow.keras.layers.Dense(4, activation="softmax")(dense_layer) + input_layer = keras.layers.Input(360) + dense_layer = keras.layers.Dense(50, activation="relu")(input_layer) + output_layer = keras.layers.Dense(4, activation="softmax")(dense_layer) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -699,7 +699,7 @@ Here is the code. # Data outputs data_outputs = numpy.load("../data/outputs.npy") - data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) + data_outputs = keras.utils.to_categorical(data_outputs) # Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class num_generations = 100 # Number of generations. @@ -731,11 +731,11 @@ Here is the code. # print(f"Predictions : \n{predictions}") # Calculate the categorical crossentropy for the trained model. - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. - ca = tensorflow.keras.metrics.CategoricalAccuracy() + ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") @@ -746,7 +746,7 @@ cross entropy. .. code:: python - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) .. _prepare-the-training-data-2: @@ -774,7 +774,7 @@ vector extracted from each image has a length 360. Simply download these 2 files and read them according to the next code. Note that the class labels are one-hot encoded using the -``tensorflow.keras.utils.to_categorical()`` function. +``keras.utils.to_categorical()`` function. .. code:: python @@ -783,7 +783,7 @@ Note that the class labels are one-hot encoded using the data_inputs = numpy.load("../data/dataset_features.npy") data_outputs = numpy.load("../data/outputs.npy") - data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) + data_outputs = keras.utils.to_categorical(data_outputs) The next figure shows how the fitness value changes. @@ -809,7 +809,7 @@ Here is the complete code. .. code:: python - import tensorflow.keras + import keras import pygad.kerasga import numpy import pygad @@ -821,7 +821,7 @@ Here is the complete code. solution=solution, data=data_inputs) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -831,20 +831,20 @@ Here is the complete code. print(f"Fitness = {ga_instance.best_solution()[1]}") # Build the keras model using the functional API. - input_layer = tensorflow.keras.layers.Input(shape=(100, 100, 3)) - conv_layer1 = tensorflow.keras.layers.Conv2D(filters=5, + input_layer = keras.layers.Input(shape=(100, 100, 3)) + conv_layer1 = keras.layers.Conv2D(filters=5, kernel_size=7, activation="relu")(input_layer) - max_pool1 = tensorflow.keras.layers.MaxPooling2D(pool_size=(5,5), + max_pool1 = keras.layers.MaxPooling2D(pool_size=(5,5), strides=5)(conv_layer1) - conv_layer2 = tensorflow.keras.layers.Conv2D(filters=3, + conv_layer2 = keras.layers.Conv2D(filters=3, kernel_size=3, activation="relu")(max_pool1) - flatten_layer = tensorflow.keras.layers.Flatten()(conv_layer2) - dense_layer = tensorflow.keras.layers.Dense(15, activation="relu")(flatten_layer) - output_layer = tensorflow.keras.layers.Dense(4, activation="softmax")(dense_layer) + flatten_layer = keras.layers.Flatten()(conv_layer2) + dense_layer = keras.layers.Dense(15, activation="relu")(flatten_layer) + output_layer = keras.layers.Dense(4, activation="softmax")(dense_layer) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -855,7 +855,7 @@ Here is the complete code. # Data outputs data_outputs = numpy.load("../data/dataset_outputs.npy") - data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) + data_outputs = keras.utils.to_categorical(data_outputs) # Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class num_generations = 200 # Number of generations. @@ -887,11 +887,11 @@ Here is the complete code. # print(f"Predictions : \n{predictions}") # Calculate the categorical crossentropy for the trained model. - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. - ca = tensorflow.keras.metrics.CategoricalAccuracy() + ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") @@ -903,20 +903,20 @@ each input sample is 100x100x3. .. code:: python # Build the keras model using the functional API. - input_layer = tensorflow.keras.layers.Input(shape=(100, 100, 3)) - conv_layer1 = tensorflow.keras.layers.Conv2D(filters=5, + input_layer = keras.layers.Input(shape=(100, 100, 3)) + conv_layer1 = keras.layers.Conv2D(filters=5, kernel_size=7, activation="relu")(input_layer) - max_pool1 = tensorflow.keras.layers.MaxPooling2D(pool_size=(5,5), + max_pool1 = keras.layers.MaxPooling2D(pool_size=(5,5), strides=5)(conv_layer1) - conv_layer2 = tensorflow.keras.layers.Conv2D(filters=3, + conv_layer2 = keras.layers.Conv2D(filters=3, kernel_size=3, activation="relu")(max_pool1) - flatten_layer = tensorflow.keras.layers.Flatten()(conv_layer2) - dense_layer = tensorflow.keras.layers.Dense(15, activation="relu")(flatten_layer) - output_layer = tensorflow.keras.layers.Dense(4, activation="softmax")(dense_layer) + flatten_layer = keras.layers.Flatten()(conv_layer2) + dense_layer = keras.layers.Dense(15, activation="relu")(flatten_layer) + output_layer = keras.layers.Dense(4, activation="softmax")(dense_layer) - model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) + model = keras.Model(inputs=input_layer, outputs=output_layer) .. _prepare-the-training-data-3: @@ -942,7 +942,7 @@ section of the ``pygad.cnn`` module. Simply download these 2 files and read them according to the next code. Note that the class labels are one-hot encoded using the -``tensorflow.keras.utils.to_categorical()`` function. +``keras.utils.to_categorical()`` function. .. code:: python @@ -951,7 +951,7 @@ Note that the class labels are one-hot encoded using the data_inputs = numpy.load("../data/dataset_inputs.npy") data_outputs = numpy.load("../data/dataset_outputs.npy") - data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) + data_outputs = keras.utils.to_categorical(data_outputs) The next figure shows how the fitness value changes. @@ -984,7 +984,7 @@ Example 5: Image Classification using Data Generator ---------------------------------------------------- This example uses the image data generator -``tensorflow.keras.preprocessing.image.ImageDataGenerator`` to feed data +``keras.preprocessing.image.ImageDataGenerator`` to feed data to the model. Instead of reading all the data in the memory, the data generator generates the data needed by the model and only save it in the memory instead of saving all the data. This frees the memory but adds @@ -993,7 +993,7 @@ more computational time. .. code:: python import tensorflow as tf - import tensorflow.keras + import keras import pygad.kerasga import pygad @@ -1004,7 +1004,7 @@ more computational time. solution=solution, data=train_generator) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -1068,11 +1068,11 @@ more computational time. # print(f"Predictions : \n{predictions}") # Calculate the categorical crossentropy for the trained model. - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. - ca = tensorflow.keras.metrics.CategoricalAccuracy() + ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/docs/source/torchga.rst b/docs/source/torchga.rst index 27825e83..619caf5d 100644 --- a/docs/source/torchga.rst +++ b/docs/source/torchga.rst @@ -664,7 +664,7 @@ Here is the code. # Data outputs data_outputs = torch.from_numpy(numpy.load("outputs.npy")).long() - # The next 2 lines are equivelant to this Keras function to perform 1-hot encoding: tensorflow.keras.utils.to_categorical(data_outputs) + # The next 2 lines are equivelant to this Keras function to perform 1-hot encoding: keras.utils.to_categorical(data_outputs) # temp_outs = numpy.zeros((data_outputs.shape[0], numpy.unique(data_outputs).size), dtype=numpy.uint8) # temp_outs[numpy.arange(data_outputs.shape[0]), numpy.uint8(data_outputs)] = 1 diff --git a/examples/KerasGA/XOR_classification.py b/examples/KerasGA/XOR_classification.py index 2d7e4ee5..92fc8044 100644 --- a/examples/KerasGA/XOR_classification.py +++ b/examples/KerasGA/XOR_classification.py @@ -1,4 +1,4 @@ -import tensorflow.keras +import keras import pygad.kerasga import numpy import pygad @@ -10,7 +10,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=data_inputs) - bce = tensorflow.keras.losses.BinaryCrossentropy() + bce = keras.losses.BinaryCrossentropy() solution_fitness = 1.0 / (bce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -20,11 +20,11 @@ def on_generation(ga_instance): print(f"Fitness = {ga_instance.best_solution()[1]}") # Build the keras model using the functional API. -input_layer = tensorflow.keras.layers.Input(2) -dense_layer = tensorflow.keras.layers.Dense(4, activation="relu")(input_layer) -output_layer = tensorflow.keras.layers.Dense(2, activation="softmax")(dense_layer) +input_layer = keras.layers.Input(2) +dense_layer = keras.layers.Dense(4, activation="relu")(input_layer) +output_layer = keras.layers.Dense(2, activation="softmax")(dense_layer) -model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) +model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -71,11 +71,11 @@ def on_generation(ga_instance): print(f"Predictions : \n{predictions}") # Calculate the binary crossentropy for the trained model. -bce = tensorflow.keras.losses.BinaryCrossentropy() +bce = keras.losses.BinaryCrossentropy() print(f"Binary Crossentropy : {bce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. -ba = tensorflow.keras.metrics.BinaryAccuracy() +ba = keras.metrics.BinaryAccuracy() ba.update_state(data_outputs, predictions) accuracy = ba.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/examples/KerasGA/cancer_dataset.py b/examples/KerasGA/cancer_dataset.py index f5e87d39..e911d92a 100644 --- a/examples/KerasGA/cancer_dataset.py +++ b/examples/KerasGA/cancer_dataset.py @@ -1,5 +1,5 @@ import tensorflow as tf -import tensorflow.keras +import keras import pygad.kerasga import pygad import numpy @@ -11,7 +11,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=train_data) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -80,11 +80,11 @@ def on_generation(ga_instance): # print("Predictions : \n", predictions) # Calculate the categorical crossentropy for the trained model. -cce = tensorflow.keras.losses.CategoricalCrossentropy() +cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. -ca = tensorflow.keras.metrics.CategoricalAccuracy() +ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/examples/KerasGA/cancer_dataset_generator.py b/examples/KerasGA/cancer_dataset_generator.py index 9746e907..bbe4b1f6 100644 --- a/examples/KerasGA/cancer_dataset_generator.py +++ b/examples/KerasGA/cancer_dataset_generator.py @@ -1,5 +1,5 @@ import tensorflow as tf -import tensorflow.keras +import keras import pygad.kerasga import pygad @@ -10,7 +10,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=train_generator) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -74,11 +74,11 @@ def on_generation(ga_instance): # print("Predictions : \n", predictions) # Calculate the categorical crossentropy for the trained model. -cce = tensorflow.keras.losses.CategoricalCrossentropy() +cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. -ca = tensorflow.keras.metrics.CategoricalAccuracy() +ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/examples/KerasGA/image_classification_CNN.py b/examples/KerasGA/image_classification_CNN.py index 5e467607..8f71edd1 100644 --- a/examples/KerasGA/image_classification_CNN.py +++ b/examples/KerasGA/image_classification_CNN.py @@ -1,4 +1,4 @@ -import tensorflow.keras +import keras import pygad.kerasga import numpy import pygad @@ -12,7 +12,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=data_inputs) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / \ (cce(data_outputs, predictions).numpy() + 0.00000001) @@ -26,22 +26,22 @@ def on_generation(ga_instance): # Build the keras model using the functional API. -input_layer = tensorflow.keras.layers.Input(shape=(100, 100, 3)) -conv_layer1 = tensorflow.keras.layers.Conv2D(filters=5, +input_layer = keras.layers.Input(shape=(100, 100, 3)) +conv_layer1 = keras.layers.Conv2D(filters=5, kernel_size=7, activation="relu")(input_layer) -max_pool1 = tensorflow.keras.layers.MaxPooling2D(pool_size=(5, 5), +max_pool1 = keras.layers.MaxPooling2D(pool_size=(5, 5), strides=5)(conv_layer1) -conv_layer2 = tensorflow.keras.layers.Conv2D(filters=3, +conv_layer2 = keras.layers.Conv2D(filters=3, kernel_size=3, activation="relu")(max_pool1) -flatten_layer = tensorflow.keras.layers.Flatten()(conv_layer2) -dense_layer = tensorflow.keras.layers.Dense( +flatten_layer = keras.layers.Flatten()(conv_layer2) +dense_layer = keras.layers.Dense( 15, activation="relu")(flatten_layer) -output_layer = tensorflow.keras.layers.Dense( +output_layer = keras.layers.Dense( 4, activation="softmax")(dense_layer) -model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) +model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -52,7 +52,7 @@ def on_generation(ga_instance): # Data outputs data_outputs = numpy.load("../data/dataset_outputs.npy") -data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) +data_outputs = keras.utils.to_categorical(data_outputs) # Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/README_pygad_ReadTheDocs.html#pygad-ga-class num_generations = 200 # Number of generations. @@ -86,11 +86,11 @@ def on_generation(ga_instance): # print("Predictions : \n", predictions) # Calculate the categorical crossentropy for the trained model. -cce = tensorflow.keras.losses.CategoricalCrossentropy() +cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. -ca = tensorflow.keras.metrics.CategoricalAccuracy() +ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/examples/KerasGA/image_classification_Dense.py b/examples/KerasGA/image_classification_Dense.py index 986282a3..60aab48f 100644 --- a/examples/KerasGA/image_classification_Dense.py +++ b/examples/KerasGA/image_classification_Dense.py @@ -1,4 +1,4 @@ -import tensorflow.keras +import keras import pygad.kerasga import numpy import pygad @@ -10,7 +10,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=data_inputs) - cce = tensorflow.keras.losses.CategoricalCrossentropy() + cce = keras.losses.CategoricalCrossentropy() solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001) return solution_fitness @@ -20,11 +20,11 @@ def on_generation(ga_instance): print(f"Fitness = {ga_instance.best_solution()[1]}") # Build the keras model using the functional API. -input_layer = tensorflow.keras.layers.Input(360) -dense_layer = tensorflow.keras.layers.Dense(50, activation="relu")(input_layer) -output_layer = tensorflow.keras.layers.Dense(4, activation="softmax")(dense_layer) +input_layer = keras.layers.Input(360) +dense_layer = keras.layers.Dense(50, activation="relu")(input_layer) +output_layer = keras.layers.Dense(4, activation="softmax")(dense_layer) -model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) +model = keras.Model(inputs=input_layer, outputs=output_layer) # Create an instance of the pygad.kerasga.KerasGA class to build the initial population. keras_ga = pygad.kerasga.KerasGA(model=model, @@ -35,7 +35,7 @@ def on_generation(ga_instance): # Data outputs data_outputs = numpy.load("../data/outputs.npy") -data_outputs = tensorflow.keras.utils.to_categorical(data_outputs) +data_outputs = keras.utils.to_categorical(data_outputs) # Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/README_pygad_ReadTheDocs.html#pygad-ga-class num_generations = 100 # Number of generations. @@ -67,11 +67,11 @@ def on_generation(ga_instance): # print("Predictions : \n", predictions) # Calculate the categorical crossentropy for the trained model. -cce = tensorflow.keras.losses.CategoricalCrossentropy() +cce = keras.losses.CategoricalCrossentropy() print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}") # Calculate the classification accuracy for the trained model. -ca = tensorflow.keras.metrics.CategoricalAccuracy() +ca = keras.metrics.CategoricalAccuracy() ca.update_state(data_outputs, predictions) accuracy = ca.result().numpy() print(f"Accuracy : {accuracy}") diff --git a/examples/KerasGA/regression_example.py b/examples/KerasGA/regression_example.py index 11312c35..7fcccefe 100644 --- a/examples/KerasGA/regression_example.py +++ b/examples/KerasGA/regression_example.py @@ -1,4 +1,4 @@ -import tensorflow.keras +import keras import pygad.kerasga import numpy import pygad @@ -10,7 +10,7 @@ def fitness_func(ga_instanse, solution, sol_idx): solution=solution, data=data_inputs) - mae = tensorflow.keras.losses.MeanAbsoluteError() + mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() + 0.00000001 solution_fitness = 1.0 / abs_error @@ -21,12 +21,12 @@ def on_generation(ga_instance): print(f"Fitness = {ga_instance.best_solution()[1]}") # Create the Keras model. -input_layer = tensorflow.keras.layers.Input(3) -dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer) +input_layer = keras.layers.Input(3) +dense_layer1 = keras.layers.Dense(5, activation="relu")(input_layer) dense_layer1.trainable = False -output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1) +output_layer = keras.layers.Dense(1, activation="linear")(dense_layer1) -model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer) +model = keras.Model(inputs=input_layer, outputs=output_layer) keras_ga = pygad.kerasga.KerasGA(model=model, num_solutions=10) @@ -69,7 +69,7 @@ def on_generation(ga_instance): data=data_inputs) print(f"Predictions : \n{predictions}") -mae = tensorflow.keras.losses.MeanAbsoluteError() +mae = keras.losses.MeanAbsoluteError() abs_error = mae(data_outputs, predictions).numpy() print(f"Absolute Error : {abs_error}") diff --git a/examples/TorchGA/image_classification_Dense.py b/examples/TorchGA/image_classification_Dense.py index 85e8b1f3..e0c46a85 100644 --- a/examples/TorchGA/image_classification_Dense.py +++ b/examples/TorchGA/image_classification_Dense.py @@ -40,7 +40,7 @@ def on_generation(ga_instance): # Data outputs data_outputs = torch.from_numpy(numpy.load("../data/outputs.npy")).long() -# The next 2 lines are equivelant to this Keras function to perform 1-hot encoding: tensorflow.keras.utils.to_categorical(data_outputs) +# The next 2 lines are equivelant to this Keras function to perform 1-hot encoding: keras.utils.to_categorical(data_outputs) # temp_outs = numpy.zeros((data_outputs.shape[0], numpy.unique(data_outputs).size), dtype=numpy.uint8) # temp_outs[numpy.arange(data_outputs.shape[0]), numpy.uint8(data_outputs)] = 1 diff --git a/pygad/kerasga/kerasga.py b/pygad/kerasga/kerasga.py index cda2c4b9..3ec8f2cd 100644 --- a/pygad/kerasga/kerasga.py +++ b/pygad/kerasga/kerasga.py @@ -1,6 +1,6 @@ import copy import numpy -import tensorflow.keras +import keras def model_weights_as_vector(model): """ @@ -100,7 +100,7 @@ def predict(model, # Fetch the parameters of the best solution. solution_weights = model_weights_as_matrix(model=model, weights_vector=solution) - _model = tensorflow.keras.models.clone_model(model) + _model = keras.models.clone_model(model) _model.set_weights(solution_weights) predictions = _model.predict(x=data, batch_size=batch_size,