Python code

Code shown in the basic video about neural networks in Python

import numpy as np
import matplotlib.pyplot as plt
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam

# Data
PSA = np.array([3.8,3.4,2.9,2.8,2.7,2.1,1.6,2.5,2.0,1.7,1.4,1.2,0.9,0.8])
Group = np.array(['C','C','C','C','C','C','C','H','H','H','H','H','H','H'])
target = np.where(Group == 'C', 1, 0)

random.seed(915)
# Build the neural network model
model = Sequential()
model.add(Dense(1, input_dim=1, activation='sigmoid'))

# Compile the model
model.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy')

# Train the model
model.fit(PSA, target, epochs=3000, verbose=1)

# Show the final weights
model.get_weights()

# Use the network to predict
x1=np.array([2])
model.predict(x1, verbose=0)

 

Code for an autoencoder that remove noise in an image:

import numpy as np
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt

# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Filter the dataset to include only '2's
x_train_2s = x_train[y_train == 2]
x_test_2s = x_test[y_test == 2]

# Normalize the data
x_train_2s = x_train_2s.astype('float32') / 255.
x_test_2s = x_test_2s.astype('float32') / 255.

# Reshape the data to include the channel dimension
x_train_2s = np.reshape(x_train_2s, (len(x_train_2s), 28, 28, 1))
x_test_2s = np.reshape(x_test_2s, (len(x_test_2s), 28, 28, 1))

# Add noise to the images
noise_factor = 0.3
x_train_noisy = x_train_2s + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train_2s.shape) 
x_test_noisy = x_test_2s + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test_2s.shape)

# Make sure all values are between 0 and 1
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)

# Build the autoencoder
input_img = layers.Input(shape=(28, 28, 1))

# Encoder
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2, 2), padding='same')(x)

# Decoder
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2, 2))(x)
decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = models.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

# Train the autoencoder
autoencoder.fit(x_train_noisy, x_train_2s,epochs=50,batch_size=128)

# Use the autoencoder to denoise the test images
decoded_imgs = autoencoder.predict(x_test_noisy)

n=4
plt.figure(figsize=(6, 20))
for i in range(n):
    # Display original
    ax = plt.subplot(10, 3, 3*i + 1)
    plt.imshow(x_test_2s[i].reshape(28, 28))
    plt.title("Original")
    plt.axis('off')

    # Display noisy
    ax = plt.subplot(10, 3, 3*i + 2)
    plt.imshow(x_test_noisy[i].reshape(28, 28))
    plt.title("Noisy")
    plt.axis('off')

    # Display denoised
    ax = plt.subplot(10, 3, 3*i + 3)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.title("Denoised")
    plt.axis('off')

plt.show()

 

Code for recurrent neural network (RNN). Note that this code is for educational purposes only and is therefore not intended to be used to predict the stock market.

import numpy as np
import random
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN
# Normalize the data
y = np.array([9, 7, 6, 10, 8, 7, 11, 9, 6, 12, 10, 7, 11, 9, 7])
yn = (y - np.min(y)) / (np.max(y) - np.min(y))
train_size=10
train = yn[0:train_size]
# Prepare training data
X_train = []
y_train = []
step = 4
for i in range(step, len(train)):
    X_train.append(yn[i-step:i])
    y_train.append(yn[i])

X_train = np.reshape(X_train, (len(X_train),step, 1))# Data, samples, time steps, features
y_train = np.reshape(y_train, (len(y_train),1, 1))
# Set seed for reproducibility
random.seed(40)
# Define the RNN model
model = Sequential()
model.add(SimpleRNN(units=5, input_shape=(step, 1), activation="sigmoid"))
model.add(Dense(units=1, activation="sigmoid"))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
# Train the model
history = model.fit(X_train, y_train, epochs=1000)
# Plot the training loss
plt.figure()
plt.plot(history.history['loss'])
plt.xlabel('Epochs') # Added xlabel
plt.ylabel('Loss') # Added ylabel
plt.title('Training Loss') # Added title
plt.show()

# Predict on training data
y_pred_train = model.predict(X_train)
y_pred_train = y_pred_train.flatten()
# Prepare validation data
valid = yn[train_size-step:len(yn)]
X_valid = []
y_valid = []
for i in range(step, len(valid)):
    X_valid.append(valid[i-step:i])
    y_valid.append(valid[i])

X_valid = np.reshape(X_valid, (len(X_valid),step, 1))
y_valid = np.reshape(y_valid, (len(y_valid),1, 1))

# Predict on validation data
y_pred_valid = model.predict(X_valid)
y_pred_valid = y_pred_valid.flatten()

# Set time intervals
t1 = np.arange(1, len(yn) + 1)
t2 = np.arange(step+1, train_size + 1)
t3 = np.arange(step+y_pred_train.shape[0]+1, len(yn) + 1)
plt.figure(figsize=(8, 5))
plt.plot(t1, yn, linestyle="-", marker="o", color="blue", label="Actual values")
plt.plot(t2, y_pred_train, linestyle="-", marker="o", color="red", label="Predicted values (training)")
plt.plot(t3, y_pred_valid, linestyle="-", marker="o", color="green", label="Predicted values (validation)")
plt.xlabel('Time Step') # Added xlabel
plt.ylabel('Normalized Values') # Added ylabel
plt.title('Actual vs Predicted Values') # Added title
plt.legend()
plt.show()

Code for Convolutional neural network (CNN).

import numpy as np
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

zero = [1,1,1,1,0, 1,0,0,1,0, 1,0,0,1,0, 1,0,0,1,0, 1,1,1,1,0]
one = [0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0]
two = [1,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0]
three = [1,1,1,1,0, 0,0,0,1,0, 0,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0]
four = [1,0,0,1,0, 1,0,0,1,0, 1,1,1,1,0, 0,0,0,1,0, 0,0,0,1,0]
five = [1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0]
six = [1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0]
seven = [1,1,1,1,0, 0,0,0,1,0, 0,0,1,0,0, 0,1,0,0,0, 0,1,0,0,0]
eight= [1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0]
nine = [1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0, 0,0,0,1,0, 0,0,0,1,0]

X = np.array([zero, one, two, three, four, five, six, seven, eight, nine])
X = X.reshape(10 ,5, 5)

y = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = to_categorical(y, 10)

plt.imshow(X[3])

random.seed(1)
model = Sequential([
Conv2D(filters=1, 
kernel_size=(2, 2),
strides=(1, 1), 
padding='valid', 
input_shape=(5, 5, 1),
use_bias=False, 
activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(10, activation='softmax')
])
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X, y, epochs=1000, batch_size=1, verbose=1)

model.predict(X[[0]])

Code for transfer learning with MobileNet to predict cats and dogs

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator

# === Parameters ===
IMG_WIDTH, IMG_HEIGHT = 224, 224
BATCH_SIZE = 32
NUM_TRAINABLE_LAYERS = 0 # Change this to >0 when you want to fine-tune MobileNet

# === Directories === Change these paths to where you have saved the images
TRAIN_DATA_DIR = "C:/AI/Cats_and_dogs/training_set"
VALIDATION_DATA_DIR = "C:/AI/Cats_and_dogs/validation_set"
TEST_DATA_DIR = "C:/AI/Cats_and_dogs/test_set"

# === Data Generators ===
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
horizontal_flip=True)

val_test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

train_generator = train_datagen.flow_from_directory(
TRAIN_DATA_DIR,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
class_mode="binary"
)

validation_generator = val_test_datagen.flow_from_directory(
VALIDATION_DATA_DIR,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
class_mode="binary"
)

test_generator = val_test_datagen.flow_from_directory(
TEST_DATA_DIR,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
class_mode="binary",
shuffle=False)

# === Load and Configure MobileNet ===
base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
base_model.summary()

# Freeze all layers first
for layer in base_model.layers:
  layer.trainable = False

# Unfreeze last N layers
if NUM_TRAINABLE_LAYERS > 0:
  for layer in base_model.layers[-NUM_TRAINABLE_LAYERS:]:
    layer.trainable = True

base_model.summary()

# === Build the Model ===
model = Sequential([
base_model,
GlobalAveragePooling2D(), 
Dense(64, activation='relu'), 
Dense(1, activation='sigmoid')
])

# === Compile ===
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='binary_crossentropy',
metrics=['accuracy'])

# === Train ===
model.fit(
train_generator,
epochs=5,
validation_data=validation_generator)

# === Evaluate ===
loss, accuracy = model.evaluate(test_generator)
print(f"Test Accuracy: {accuracy:.4f}")

Code for U-Net

import matplotlib.pyplot as plt
import cv2
import numpy as np
from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate
from tensorflow.keras.models import Model

folder_paths = ["C:/AI/Cancer/malignant", "C:/AI/Cancer/benign"] # Paths to the 2 folders

size = 128 # Input size: 128x128

images = [] # Empty list to store the original images in
masks = [] # Empty list to store the masks in

found_mask = False # This flag helps us handle multiple masks for the same image
# Loop through both folders
for folder_path in folder_paths:
    # Loop through all files in the current folder (sorted for consistency)
    for file_path in sorted(glob(folder_path + "/*")):
        # Load and resize the image
        img = cv2.imread(file_path)
        img = cv2.resize(img, (size, size)) # Resize image to 128×128
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Convert RGB to grayscale
        img = img / 255.0 # Normalize to [0,1]

        if "mask" in file_path: # Checks if the filename contains "mask"
            if found_mask:
                # Combine with the previous mask
                masks[-1] += img
                # Ensure binary output (0 or 1)
                masks[-1] = np.where(masks[-1] > 0.5, 1.0, 0.0)
            else:
                masks.append(img) # Adds the first mask to the list
                found_mask = True
        else:
            images.append(img) # Adds original image to the list
            found_mask = False

# Convert lists to NumPy arrays
X = np.array(images) # Create an array of all original images
y = np.array(masks) # Create an array of all masked imaged (Ground truth)

X = np.expand_dims(X, -1)
y = np.expand_dims(y, -1)

print(f"X shape: {X.shape} | y shape: {y.shape}")
# Split images into training and validation
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)

# Build the U-Net
input_layer = Input(shape=(size, size, 1))
conv1 = Conv2D(64, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(input_layer)
conv1 = Conv2D(64, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)

# Second encoder block
conv2 = Conv2D(128, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(pool1)
conv2 = Conv2D(128, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)

# Third encoder block
conv3 = Conv2D(256, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(pool2)
conv3 = Conv2D(256, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)

# Fourth encoder block
conv4 = Conv2D(512, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(pool3)
conv4 = Conv2D(512, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)

# --- Bottleneck ---
bottleneck = Conv2D(1024, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(pool4)
bottleneck = Conv2D(1024, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(bottleneck)

# First decoder block
upconv1 = Conv2DTranspose(512, (2, 2), strides=2, padding="same",kernel_initializer="he_normal")(bottleneck)
concat1 = concatenate([upconv1, conv4])
conv5 = Conv2D(512, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(concat1)
conv5 = Conv2D(512, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv5)

# Second decoder block
upconv2 = Conv2DTranspose(256, (2, 2), strides=2, padding="same",kernel_initializer="he_normal")(conv5)
concat2 = concatenate([upconv2, conv3])
conv6 = Conv2D(256, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(concat2)
conv6 = Conv2D(256, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv6)

# Third decoder block
upconv3 = Conv2DTranspose(128, (2, 2), strides=2, padding="same",kernel_initializer="he_normal")(conv6)
concat3 = concatenate([upconv3, conv2])
conv7 = Conv2D(128, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(concat3)
conv7 = Conv2D(128, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv7)

# Fourth decoder block
upconv4 = Conv2DTranspose(64, (2, 2), strides=2, padding="same",kernel_initializer="he_normal")(conv7)
concat4 = concatenate([upconv4, conv1])
conv8 = Conv2D(64, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(concat4)
conv8 = Conv2D(64, (3, 3), activation="relu", padding="same",kernel_initializer="he_normal")(conv8)

# --- Output layer ---
output_layer = Conv2D(1, (1, 1), activation="sigmoid", padding="same")(conv8)

# --- Model creation ---
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()

# Compile and train the model
model.compile(loss="binary_crossentropy", optimizer="Adam", metrics=["accuracy"])
model.fit(X_train, y_train, epochs = 40, validation_data = (X_val,y_val),verbose=1)

# Compute IoU
from sklearn.metrics import jaccard_score
# Compute predicted mask
pred=model.predict(X_val,verbose=1)
pred = (pred > 0.5).astype(int) # binarize
y_true = y_val.astype(int)
# Compute IoU based on flatten predictions and ground truths
iou = jaccard_score(pred.flatten(), y_true.flatten())
print(f" IoU (Jaccard Score): {iou:.4f}")


# Plot
i=6 # Try other values
plt.subplot(1, 3, 1)
plt.imshow(X_val[i],cmap="gray")
plt.subplot(1, 3, 2)
plt.imshow(y_val[i],cmap="gray")
plt.subplot(1, 3, 3)
pred=model.predict(np.expand_dims(X_val[i], axis=0),verbose=1)[0]
pred = (pred > 0.5) # binarize
plt.imshow(pred,cmap="gray")