36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
from ast import arg
|
|
import numpy as np # type: ignore
|
|
|
|
# Fix the seed for reproducibility
|
|
np.random.seed(100)
|
|
|
|
def sigmoid(z):
|
|
return 1 / (1 + np.exp(-z))
|
|
|
|
class Neuron:
|
|
def __init__(self, *args):
|
|
# 1. Initialize weights and bias with random values
|
|
self.weights = np.random.uniform(-1, 1, size=args[0])
|
|
self.bias = np.random.uniform(-1, 1)
|
|
|
|
def activate(self, inputs):
|
|
# 2. Compute the weighted sum using dot product and add bias
|
|
input_sum_with_bias = np.dot(inputs, self.weights) + self.bias
|
|
# 3. Apply the sigmoid activation function
|
|
output = sigmoid(input_sum_with_bias)
|
|
return output
|
|
|
|
# Create a neuron with 6 inputs
|
|
neuron = Neuron(6)
|
|
# Generate inputs for the neuron
|
|
neuron_inputs = np.array([-0.5, 0.4, -0.8, 0.2, 0.1, -0.3])
|
|
# Pass the inputs to the created neuron
|
|
neuron_output = neuron.activate(neuron_inputs)
|
|
|
|
print(f'Output of the neuron is {neuron_output:.3f}')
|
|
|
|
'''
|
|
How it works:
|
|
# The neuron takes any number of inputs, computes the weighted sum, applies the sigmoid function, and returns the output which is a value between 0 and 1.
|
|
|
|
''' |