TE ANN Practical

 practical 01.

import numpy as np
import matplotlib.pyplot as plt

# Activation Functions
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def tanh(x):
    return np.tanh(x)

def relu(x):
    return np.maximum(0, x)

def softmax(x):
    # Stabilized softmax to prevent overflow
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum(axis=0)

start = float(input("Enter start value for x (e.g., -10): "))
end = float(input("Enter end value for x (e.g., 10): "))
x = np.linspace(start, end, 100)

plt.figure(figsize=(10, 8))

plt.subplot(2, 2, 1)
plt.plot(x, sigmoid(x))
plt.title('Sigmoid Activation Function')
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(x, tanh(x))
plt.title('Tanh Activation Function')
plt.grid(True)

plt.subplot(2, 2, 3)
plt.plot(x, relu(x))
plt.title('ReLU Activation Function')
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(x, softmax(x))
plt.title('Softmax Activation Function')
plt.grid(True)

plt.tight_layout()
plt.show()


Practical 2.

# Experiment 2: Generate ANDNOT function using McCulloch-Pitts neural net
def mcp_andnot(x1, x2):
    w1, w2 = 1, -1 # Positive weight for x1, negative for x2
    threshold = 1
   
    y_in = x1 * w1 + x2 * w2
    # Activation function
    if y_in >= threshold:
        return 1
    else:
        return 0

if __name__ == "__main__":
    print("Truth Table for ANDNOT")
    while True:
        try:
            x1_in = input("Enter x1 (0 or 1, or 'q' to quit): ")
            if x1_in.lower() == 'q': break
            x1 = int(x1_in)
            x2 = int(input("Enter x2 (0 or 1): "))
            y = mcp_andnot(x1, x2)
            print(f"Output for {x1} ANDNOT {x2} is {y}")
        except ValueError:
            print("Please enter valid integers.")



Practical 3.

# Experiment 3: Perceptron Neural Network to recognize even and odd ASCII numbers
import numpy as np

# ASCII values from 0 to 9 are 48 to 57
# We represent them as 8-bit binary input features
X = np.array([list(map(int, format(i, '08b'))) for i in range(48, 58)])

# Target: 0 for Even, 1 for Odd
y = np.array([0 if i % 2 == 0 else 1 for i in range(10)])

# Perceptron Initialization
W = np.zeros(8)
b = 0
learning_rate = 0.1
epochs = 10

# Training the Perceptron
for epoch in range(epochs):
    for i in range(len(X)):
        y_in = np.dot(X[i], W) + b
        y_pred = 1 if y_in >= 0 else 0
        error = y[i] - y_pred
       
        # Update weights and biases
        W += learning_rate * error * X[i]
        b += learning_rate * error

print("Testing the trained perceptron:")
while True:
    user_input = input("Enter a digit (0-9) to test (or 'q' to quit): ")
    if user_input.lower() == 'q': break
    if len(user_input) == 1 and user_input.isdigit():
        i = ord(user_input)
        binary_input = list(map(int, format(i, '08b')))
        y_in = np.dot(binary_input, W) + b
        y_pred = 1 if y_in >= 0 else 0
        result = "Odd" if y_pred == 1 else "Even"
        print(f"Number {user_input} (ASCII: {i}, Binary: {format(i, '08b')}) -> Predicted: {result}")
    else:
        print("Invalid input. Please enter a single digit.")

Practical 4.

# Experiment 4: Perceptron learning law with decision regions
import numpy as np
import matplotlib.pyplot as plt

# Dataset for OR gate
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 1])

W = np.random.rand(2)
b = np.random.rand(1)
learning_rate = float(input("Enter learning rate (e.g., 0.1): "))
epochs = int(input("Enter number of epochs (e.g., 10): "))

# Train
for epoch in range(epochs):
    for i in range(len(X)):
        y_in = np.dot(X[i], W) + b
        y_pred = 1 if y_in >= 0 else 0
        error = y[i] - y_pred
        # Weight update
        W += learning_rate * error * X[i]
        b += learning_rate * error

# Plot Decision Boundary
plt.figure(figsize=(8,6))
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5

xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
                     np.arange(y_min, y_max, 0.01))

grid_pts = np.c_[xx.ravel(), yy.ravel()]
Z = np.array([1 if np.dot(pt, W) + b >= 0 else 0 for pt in grid_pts])
Z = Z.reshape(xx.shape)

plt.contourf(xx, yy, Z, alpha=0.3, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired, marker='o', s=100)
plt.title("Perceptron Decision Region for OR Gate")
plt.xlabel("X1")
plt.ylabel("X2")
plt.grid(True)
plt.show()


Practical 5.

# Experiment 5: Artificial Neural Network Forward and Backward Propagation From Scratch
import numpy as np

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
    return x * (1 - x)

# XOR input and expected output
X = np.array([[0,0], [0,1], [1,0], [1,1]])
y = np.array([[0], [1], [1], [0]])

np.random.seed(42)

# Neural Network Architecture: 2 inputs, 2 hidden neurons, 1 output neuron
W1 = np.random.uniform(size=(2, 2))
b1 = np.random.uniform(size=(1, 2))
W2 = np.random.uniform(size=(2, 1))
b2 = np.random.uniform(size=(1, 1))
learning_rate = float(input("Enter learning rate (e.g., 0.5): "))
epochs = int(input("Enter number of epochs (e.g., 10000): "))

for i in range(epochs):
    # Forward Propagation
    hidden_input = np.dot(X, W1) + b1
    hidden_output = sigmoid(hidden_input)
   
    final_input = np.dot(hidden_output, W2) + b2
    predicted_output = sigmoid(final_input)
   
    # Backward Propagation
    error = y - predicted_output
    d_predicted_output = error * sigmoid_derivative(predicted_output)
   
    error_hidden_layer = d_predicted_output.dot(W2.T)
    d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_output)
   
    # Updating Weights and Biases
    W2 += hidden_output.T.dot(d_predicted_output) * learning_rate
    b2 += np.sum(d_predicted_output, axis=0, keepdims=True) * learning_rate
    W1 += X.T.dot(d_hidden_layer) * learning_rate
    b1 += np.sum(d_hidden_layer, axis=0, keepdims=True) * learning_rate

print("Output from neural network after training:")
print(np.round(predicted_output, 2))

while True:
    user_input = input("Enter 2 binary inputs separated by space to test XOR (or 'q' to quit): ")
    if user_input.lower() == 'q': break
    try:
        inputs = list(map(int, user_input.split()))
        if len(inputs) == 2:
            test_x = np.array([inputs])
            hidden = sigmoid(np.dot(test_x, W1) + b1)
            pred = sigmoid(np.dot(hidden, W2) + b2)
            print(f"Predicted Output for {inputs}: {np.round(pred, 2)[0][0]}")
        else:
            print("Please enter exactly two inputs.")
    except ValueError:
        print("Invalid input.")


Practical 6.


# Experiment 6: Bidirectional Associative Memory (BAM)
import numpy as np

# Two pairs of vectors
X1 = np.array([1, 1, 1, -1])
Y1 = np.array([1, -1])

X2 = np.array([-1, -1, 1, 1])
Y2 = np.array([-1, 1])

# Calculate Weight Matrix
W = np.outer(X1, Y1) + np.outer(X2, Y2)

def bam_recall_y(x):
    y_in = np.dot(x, W)
    return np.where(y_in >= 0, 1, -1)

def bam_recall_x(y):
    x_in = np.dot(y, W.T)
    return np.where(x_in >= 0, 1, -1)

print("Weight Matrix:")
print(W)
print("\nTesting Recall with predefined patterns:")
print(f"Input X1 {X1} recalling Y1 -> {bam_recall_y(X1)}")
print(f"Input Y1 {Y1} recalling X1 -> {bam_recall_x(Y1)}")
print(f"Input X2 {X2} recalling Y2 -> {bam_recall_y(X2)}")
print(f"Input Y2 {Y2} recalling X2 -> {bam_recall_x(Y2)}")

while True:
    test_x_str = input("\nEnter a bipolar X vector of length 4 (e.g., '1 1 1 -1') or 'q' to quit: ")
    if test_x_str.lower() == 'q': break
    try:
        test_x = np.array(list(map(int, test_x_str.split())))
        if len(test_x) == 4:
            print(f"Recalled Y: {bam_recall_y(test_x)}")
        else:
            print("Please enter exactly 4 values.")
    except ValueError:
        print("Invalid input.")


Practical 7.


# Experiment 7: Adaptive Resonance Theory (ART1)
import numpy as np

class ART1:
    def __init__(self, num_inputs, num_clusters, rho):
        self.rho = rho
        # Bottom-up weights
        self.W_bu = np.ones((num_inputs, num_clusters)) / (num_inputs + 1)
        # Top-down weights
        self.W_td = np.ones((num_clusters, num_inputs))
       
    def predict_and_learn(self, x):
        while True:
            # Calculate bottom-up activation
            y = np.dot(x, self.W_bu)
            cluster = np.argmax(y)
           
            # Vigilance test using top-down expectation
            expectation = self.W_td[cluster]
            match = np.sum(np.minimum(x, expectation))
            norm_x = np.sum(x)
           
            if match / norm_x >= self.rho: # Pass vigilance test
                # Learn by updating weights
                updated_expectation = np.minimum(x, expectation)
                self.W_bu[:, cluster] = (2 * updated_expectation) / (1 + np.sum(updated_expectation))
                self.W_td[cluster] = updated_expectation
                return cluster
            else:
                # Failed vigilance, reset and try another cluster
                y[cluster] = -1
                if np.all(y == -1):
                    return -1 # All clusters exhausted

# Test data (binary patterns)
patterns = np.array([
    [1, 1, 0, 0, 0],
    [0, 0, 1, 1, 1],
    [1, 0, 0, 0, 0],
    [0, 0, 1, 0, 1]
])

rho_input = float(input("Enter vigilance parameter rho (e.g., 0.5): "))
art = ART1(num_inputs=5, num_clusters=2, rho=rho_input)

print("Clustering Results on training data:")
for i, pattern in enumerate(patterns):
    cluster_id = art.predict_and_learn(pattern)
    print(f"Pattern {i} {pattern} assigned to Cluster {cluster_id}")

while True:
    user_pattern = input("\nEnter a 5-bit binary pattern separated by space (e.g. '1 1 0 0 0') or 'q' to quit: ")
    if user_pattern.lower() == 'q': break
    try:
        pattern = np.array(list(map(int, user_pattern.split())))
        if len(pattern) == 5:
            cluster_id = art.predict_and_learn(pattern)
            print(f"Pattern {pattern} assigned to Cluster {cluster_id}")
        else:
            print("Please enter exactly 5 bits.")
    except ValueError:
        print("Invalid input.")


Practical 8.

# Experiment 8: Back Propagation Feed-forward Neural Network class
import numpy as np

class BPNN:
    def __init__(self, input_size, hidden_size, output_size):
        self.W1 = np.random.uniform(size=(input_size, hidden_size))
        self.b1 = np.random.uniform(size=(1, hidden_size))
        self.W2 = np.random.uniform(size=(hidden_size, output_size))
        self.b2 = np.random.uniform(size=(1, output_size))
       
    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))
       
    def derivative(self, x):
        return x * (1 - x)
       
    def train(self, X, y, lr=0.1, epochs=5000):
        for _ in range(epochs):
            # Forward
            hidden = self.sigmoid(np.dot(X, self.W1) + self.b1)
            output = self.sigmoid(np.dot(hidden, self.W2) + self.b2)
           
            # Errors
            d_out = (y - output) * self.derivative(output)
            d_hidden = d_out.dot(self.W2.T) * self.derivative(hidden)
           
            # Update
            self.W2 += hidden.T.dot(d_out) * lr
            self.b2 += np.sum(d_out, axis=0, keepdims=True) * lr
            self.W1 += X.T.dot(d_hidden) * lr
            self.b1 += np.sum(d_hidden, axis=0, keepdims=True) * lr
           
    def predict(self, X):
        hidden = self.sigmoid(np.dot(X, self.W1) + self.b1)
        return self.sigmoid(np.dot(hidden, self.W2) + self.b2)

if __name__ == "__main__":
    X = np.array([[0,0], [0,1], [1,0], [1,1]])
    y = np.array([[0], [1], [1], [0]]) # XOR
   
    nn = BPNN(2, 4, 1)
    lr_input = float(input("Enter learning rate (e.g., 0.5): "))
    epochs_input = int(input("Enter number of epochs (e.g., 10000): "))
    nn.train(X, y, lr=lr_input, epochs=epochs_input)
    print("Predictions on training data:\n", np.round(nn.predict(X)))
   
    while True:
        user_in = input("\nEnter 2 binary inputs separated by space to test XOR (or 'q' to quit): ")
        if user_in.lower() == 'q': break
        try:
            inputs = np.array([list(map(int, user_in.split()))])
            if inputs.shape[1] == 2:
                print(f"Predicted Output: {np.round(nn.predict(inputs))[0][0]}")
            else:
                print("Please enter exactly two inputs.")
        except ValueError:
            print("Invalid input.")

Practical 9.

# Experiment 9: Hopfield Network storing 4 vectors
import numpy as np

# Define 4 bipolar vectors of length 5
patterns = np.array([
    [1, 1, -1, -1, 1],
    [-1, -1, 1, 1, -1],
    [1, -1, 1, -1, 1],
    [-1, 1, -1, 1, -1]
])

# Training: Initialize Weight Matrix using Hebbian Learning
n_neurons = len(patterns[0])
weights = np.zeros((n_neurons, n_neurons))

for p in patterns:
    weights += np.outer(p, p)
   
# Remove self-connections (diagonal elements = 0)
np.fill_diagonal(weights, 0)

print("Weight Matrix:")
print(weights)

def recall(pattern, w, steps=5):
    res = np.copy(pattern)
    for _ in range(steps):
        for i in range(len(res)):
            in_sum = np.dot(w[i], res)
            res[i] = 1 if in_sum >= 0 else -1
    return res

# Testing with a corrupted pattern interactively
while True:
    user_in = input("\nEnter a bipolar test pattern of length 5 (e.g., '1 1 -1 1 1') or 'q' to quit: ")
    if user_in.lower() == 'q': break
    try:
        test_pattern = np.array(list(map(int, user_in.split())))
        if len(test_pattern) == 5:
            recalled = recall(test_pattern, weights)
            print("Test Pattern:", test_pattern)
            print("Recalled Pattern:", recalled)
        else:
            print("Please enter exactly 5 values.")
    except ValueError:
        print("Invalid input.")


Practical 10.


import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# 1. Generate a linearly separable synthetic dataset
np.random.seed(42)
# Class 0: scattered around point (2, 2)
X_0 = np.random.randn(100, 2) + np.array([2, 2])
y_0 = np.zeros((100, 1))

# Class 1: scattered around point (-2, -2)
X_1 = np.random.randn(100, 2) + np.array([-2, -2])
y_1 = np.ones((100, 1))

X = np.vstack([X_0, X_1]).astype(np.float32)
y = np.vstack([y_0, y_1]).astype(np.float32)

# Shuffle the dataset
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X, y = X[indices], y[indices]

# Split into Train and Test sets (80% / 20%)
split = int(0.8 * len(X))
X_train, y_train = X[:split], y[:split]
X_test, y_test = X[split:], y[split:]

# 2. Build the Logistic Regression model using TensorFlow
# A logistic regression is functionally a single Dense layer with a sigmoid activation
model = tf.keras.Sequential([
    tf.keras.Input(shape=(2,)),                 # Input layer defining feature shape
    tf.keras.layers.Dense(1, activation='sigmoid') # Logistic (Sigmoid) layer
])

# Prompt user for hyperparameters
lr_input = float(input("Enter learning rate (e.g., 0.1): "))
epochs_input = int(input("Enter number of epochs (e.g., 50): "))

# Compile the model explicitly for logistic regression
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_input),
              loss='binary_crossentropy',
              metrics=['accuracy'])

# 3. Train the model and keep track of the history
print("Training the Logistic Regression model...")
history = model.fit(X_train, y_train, epochs=epochs_input, validation_data=(X_test, y_test), verbose=0)

# Evaluate on Test Set
loss, acc = model.evaluate(X_test, y_test, verbose=0)
print(f"Final Model Evaluation -> Test Loss: {loss:.4f}, Test Accuracy: {acc*100:.2f}%")

# 4. Extract trained weights to plot the decision boundary correctly
weights = model.layers[0].get_weights()[0]
bias = model.layers[0].get_weights()[1]

# Line equation: w1*x1 + w2*x2 + b = 0  =>  x2 = -(w1*x1 + b) / w2
x1_boundary = np.array([-5, 5])
x2_boundary = -(weights[0] * x1_boundary + bias[0]) / weights[1]

# 5. Detailed Plotting of the Dataset, Boundary, and Training Evaluation curves
plt.figure(figsize=(14, 5))

# Subplot 1: Dataset and Decision Boundary
plt.subplot(1, 2, 1)
plt.scatter(X_0[:, 0], X_0[:, 1], color='blue', label='Class 0', alpha=0.6)
plt.scatter(X_1[:, 0], X_1[:, 1], color='red', label='Class 1', alpha=0.6)
plt.plot(x1_boundary, x2_boundary, color='black', linewidth=2, label='Decision Boundary')
plt.title("Logistic Regression Decision Boundary")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.legend()
plt.grid(True)

# Subplot 2: Training History (Loss and Accuracy tracked per epoch)
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss', color='orange', linestyle='--')
plt.plot(history.history['val_loss'], label='Test Loss', color='red')
plt.plot(history.history['accuracy'], label='Train Accuracy', color='cyan', linestyle='--')
plt.plot(history.history['val_accuracy'], label='Test Accuracy', color='blue')
plt.title("Model Training History (Accuracy / Loss)")
plt.xlabel("Epoch")
plt.ylabel("Value")
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()


Practical 11.


# Experiment 11: TensorFlow implementation of CNN
# (Requires tensorflow installed)
import tensorflow as tf
from tensorflow.keras import datasets, layers, models

# Load MNIST dataset
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()

# Normalize pixel values
train_images, test_images = train_images / 255.0, test_images / 255.0
# Add channel dimension
train_images = train_images[..., tf.newaxis]
test_images = test_images[..., tf.newaxis]

# Build CNN Model
model = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

epochs_input = int(input("Enter number of epochs (e.g., 1): "))
batch_input = int(input("Enter batch size (e.g., 128): "))

print(f"Starting training ({epochs_input} epochs)...")
model.fit(train_images, train_labels, epochs=epochs_input, batch_size=batch_input, validation_data=(test_images, test_labels))

test_loss, test_acc = model.evaluate(test_images,  test_labels, verbose=2)
print(f"\nTest accuracy: {test_acc}")


Practical 12.


# Experiment 12: MNIST Handwritten Character Detection using Keras and Tensorflow
import tensorflow as tf
import matplotlib.pyplot as plt

# Load dataset
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# Normalize
X_train, X_test = X_train / 255.0, X_test / 255.0

# Define a simple feedforward neural network
model = tf.keras.models.Sequential([
  tf.keras.layers.Flatten(input_shape=(28, 28)),
  tf.keras.layers.Dense(128, activation='relu'),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# Train
epochs_input = int(input("Enter number of epochs (e.g., 3): "))
model.fit(X_train, y_train, epochs=epochs_input, validation_split=0.1)

# Evaluate on test set
eval_result = model.evaluate(X_test, y_test, verbose=2)
print(f"Test Loss: {eval_result[0]:.4f}, Test Accuracy: {eval_result[1]:.4f}")





TEXT File of the Practical


ANN Manual





Comments

Popular posts from this blog

SL-1 lab practical

CN Lab Manual