ML/DL

Here is the python script

import numpy as np

# Generate training data: pairs of numbers (a, b) and their product (a * b)
X = []
y = []

for _ in range(10000):
    a = np.random.uniform(-10, 10)
    b = np.random.uniform(-10, 10)
    X.append([a, b])
    y.append(a * b)

X = np.array(X)
y = np.array(y)

# Initialize weights and bias (for a basic linear model)
weights = np.random.randn(2)
bias = np.random.randn()

# Training: simple gradient descent
learning_rate = 0.001
epochs = 1000

for epoch in range(epochs):
    # Prediction: y_pred = a*w1 + b*w2 + bias
    y_pred = X @ weights + bias
    
    # Compute loss (mean squared error)
    loss = np.mean((y_pred - y) ** 2)
    
    # Compute gradients
    grad_weights = 2 * (X.T @ (y_pred - y)) / len(y)
    grad_bias = 2 * np.mean(y_pred - y)
    
    # Update weights
    weights -= learning_rate * grad_weights
    bias -= learning_rate * grad_bias
    
    if epoch % 100 == 0:
        print(f"Epoch {epoch}, Loss: {loss:.4f}")

Use the trained model as a “learned multiplier

def ai_multiplier(a, b):
    return a * weights[0] + b * weights[1] + bias

Test the AI multiplier

print("AI multiplier output for 3 * 4:", ai_multiplier(3, 4))
print("Expected result:", 3 * 4)