56 lines
2.1 KiB
Python
56 lines
2.1 KiB
Python
import numpy as np
|
|
import os
|
|
|
|
from activations import relu, sigmoid
|
|
|
|
# Fix the seed for reproducibility
|
|
np.random.seed(10)
|
|
|
|
class Layer:
|
|
def __init__(self, n_inputs, n_neurons, activation_function):
|
|
self.inputs = np.zeros((n_inputs, 1))
|
|
self.outputs = np.zeros((n_neurons, 1))
|
|
# 1. Initialize the weight matrix and the bias vector with random values
|
|
self.weights = np.random.uniform(-1, 1, (n_neurons, n_inputs))
|
|
self.biases = np.random.uniform(-1, 1, (n_neurons, 1))
|
|
self.activation = activation_function
|
|
|
|
def forward(self, inputs):
|
|
self.inputs = np.array(inputs).reshape(-1, 1)
|
|
# 2. Compute the raw output values of the neurons
|
|
self.outputs = np.dot(self.weights, self.inputs) + self.biases
|
|
# 3. Apply the activation function
|
|
return self.activation(self.outputs)
|
|
|
|
class Perceptron:
|
|
def __init__(self, layers):
|
|
self.layers = layers
|
|
|
|
input_size = 2
|
|
hidden_size = 6
|
|
output_size = 1
|
|
|
|
|
|
# 4. Define three layers: 2 hidden layers and 1 output layer
|
|
hidden_1 = Layer(input_size, hidden_size, relu)
|
|
hidden_2 = Layer(hidden_size, hidden_size, relu)
|
|
output_layer = Layer(hidden_size, output_size, sigmoid)
|
|
|
|
layers = [hidden_1, hidden_2, output_layer]
|
|
perceptron = Perceptron(layers)
|
|
|
|
print("Weights of the third neuron in the second hidden layer:")
|
|
print(np.round(perceptron.layers[1].weights[2], 2))
|
|
|
|
print("Weights of the neuron in the output layer:")
|
|
print(np.round(perceptron.layers[2].weights[0], 2))
|
|
|
|
'''
|
|
How it works:
|
|
# The perceptron consists of multiple layers, each with its own weights and biases. The forward method computes the output of each
|
|
# layer by applying the activation function to the weighted sum of inputs. The weights and biases are initialized randomly, and
|
|
# the perceptron can be used to process inputs through its layers.
|
|
# The output of the second hidden layer and the output layer can be accessed through the perceptron's layers attribute.
|
|
# The weights of the third neuron in the second hidden layer and the output layer are printed, showing how the perceptron is structured.
|
|
|
|
''' |