Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
31 changes: 31 additions & 0 deletions .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Python Linting

on: [push, pull_request]

jobs:
ruff:
name: Run Ruff
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'

- name: Install Ruff
run: pip install ruff

- name: Run Ruff (Check)
run: ruff check . --exclude "**/problem.py,**/solution.py,**"

- name: Run Ruff (Format Check)
run: ruff format --check . --exclude "**/problem.py,**/solution.py,**"

- name: Run Ruff (Fix Check)
run: ruff check . --fix --exclude "**/problem.py,**/solution.py,**"

- name: Run Ruff (Fix)
run: ruff format . --exclude "**/problem.py,**/solution.py,**"
14 changes: 8 additions & 6 deletions old_repo/Problems/100_Softsign/solution.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,33 @@
def softsign(x: float) -> float:
"""
Implements the Softsign activation function.

Args:
x (float): Input value

Returns:
float: The Softsign of the input, calculated as x/(1 + |x|)
"""
return x / (1 + abs(x))


def test_softsign():
# Test case 1: x = 0
assert abs(softsign(0) - 0) < 1e-7, "Test case 1 failed"

# Test case 2: x = 1
assert abs(softsign(1) - 0.5) < 1e-7, "Test case 2 failed"

# Test case 3: x = -1
assert abs(softsign(-1) - (-0.5)) < 1e-7, "Test case 3 failed"

# Test case 4: large positive number
assert abs(softsign(100) - 0.9901) < 1e-4, "Test case 4 failed"

# Test case 5: large negative number
assert abs(softsign(-100) - (-0.9901)) < 1e-4, "Test case 5 failed"


if __name__ == "__main__":
test_softsign()
print("All Softsign tests passed.")
16 changes: 9 additions & 7 deletions old_repo/Problems/102_Swish/solution.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,39 @@
import math


def swish(x: float) -> float:
"""
Implements the Swish activation function.

Args:
x: Input value

Returns:
The Swish activation value
"""
return x * (1 / (1 + math.exp(-x)))


def test_swish():
# Test case 1: x = 0
assert abs(swish(0) - 0) < 1e-6, "Test case 1 failed"

# Test case 2: x = 1
expected = 1 * (1 / (1 + math.exp(-1)))
assert abs(swish(1) - expected) < 1e-6, "Test case 2 failed"

# Test case 3: x = -1
expected = -1 * (1 / (1 + math.exp(1)))
assert abs(swish(-1) - expected) < 1e-6, "Test case 3 failed"

# Test case 4: large positive number
x = 10.0
assert abs(swish(x) - x) < 0.01, "Test case 4 failed" # Should be close to x

# Test case 5: large negative number
assert abs(swish(-10.0)) < 0.01, "Test case 5 failed" # Should be close to 0


if __name__ == "__main__":
test_swish()
print("All Swish tests passed.")

19 changes: 11 additions & 8 deletions old_repo/Problems/103_SELU/solution.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,42 @@
import math


def selu(x: float) -> float:
"""
Implements the SELU (Scaled Exponential Linear Unit) activation function.

Args:
x: Input value

Returns:
SELU activation value
"""
# Standard SELU parameters
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946

if x > 0:
return scale * x
return scale * alpha * (math.exp(x) - 1)


def test_selu():
# Test positive input
assert abs(selu(1.0) - 1.0507009873554804) < 1e-7, "Test case 1 failed"

# Test zero input
assert abs(selu(0.0) - 0.0) < 1e-7, "Test case 2 failed"

# Test negative input
assert abs(selu(-1.0) - (-1.1113307)) < 1e-6, "Test case 3 failed"

# Test large positive input
assert abs(selu(5.0) - 5.2535049) < 1e-6, "Test case 4 failed"

# Test large negative input
assert abs(selu(-5.0) - (-1.7462534)) < 1e-6, "Test case 5 failed"


if __name__ == "__main__":
test_selu()
print("All SELU tests passed.")
print("All SELU tests passed.")
10 changes: 6 additions & 4 deletions old_repo/Problems/104_logistic_regression/solution.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import numpy as np

def predict_logistic(X: np.ndarray, weights: np.ndarray, bias: float) -> np.ndarray:

def predict_logistic(X: np.ndarray, weights: np.ndarray, bias: float) -> np.ndarray:
z = np.dot(X, weights) + bias
z = np.clip(z, -500, 500) # Prevent overflow in exp
probabilities = 1 / (1 + np.exp(-z))
return (probabilities >= 0.5).astype(int)


def test_predict_logistic():
# Test case 1: Simple linearly separable case
X1 = np.array([[1, 1], [2, 2], [-1, -1], [-2, -2]])
Expand All @@ -29,20 +30,21 @@ def test_predict_logistic():
expected3 = np.array([1, 0, 0])
assert np.array_equal(predict_logistic(X3, w3, b3), expected3), "Test case 3 failed"

# # Test case 4: Single feature
# # Test case 4: Single feature
X4 = np.array([[1], [2], [-1], [-2]]).reshape(-1, 1)
w4 = np.array([2])
b4 = 0
expected4 = np.array([1, 1, 0, 0])
assert np.array_equal(predict_logistic(X4, w4, b4), expected4), "Test case 4 failed"

# # Test case 5: Numerical stability test with large values
# # Test case 5: Numerical stability test with large values
X6 = np.array([[1000, 2000], [-1000, -2000]])
w6 = np.array([0.1, 0.1])
b6 = 0
result6 = predict_logistic(X6, w6, b6)
assert result6[0] == 1 and result6[1] == 0, "Test case 5 failed"


if __name__ == "__main__":
test_predict_logistic()
print("All test cases passed!")
print("All test cases passed!")
107 changes: 66 additions & 41 deletions old_repo/Problems/105_train_softmaxreg/solution.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import numpy as np


def train_softmaxreg(X: np.ndarray, y: np.ndarray,
learning_rate: float, iterations: int) -> tuple[list[float], ...]:
"""
def train_softmaxreg(
X: np.ndarray, y: np.ndarray, learning_rate: float, iterations: int
) -> tuple[list[float], ...]:
"""
Gradient-descent training algorithm for softmax regression, that collects mean-reduced
CE losses, accuracies.
Returns
Expand All @@ -18,14 +19,16 @@ def softmax(z):
return np.exp(z) / np.sum(np.exp(z), axis=1, keepdims=True)

def accuracy(y_pred, y_true):
return (np.argmax(y_true, axis=1) == np.argmax(y_pred, axis=1)).sum() / len(y_true)
return (np.argmax(y_true, axis=1) == np.argmax(y_pred, axis=1)).sum() / len(
y_true
)

def ce_loss(y_pred, y_true):
true_labels_idx = np.argmax(y_true, axis=1)
return -np.sum(np.log(y_pred)[list(range(len(y_pred))),true_labels_idx])
return -np.sum(np.log(y_pred)[list(range(len(y_pred))), true_labels_idx])

y = y.astype(int)
C = y.max()+1 # we assume that classes start from 0
C = y.max() + 1 # we assume that classes start from 0
y = np.eye(C)[y]
X = np.hstack((np.ones((X.shape[0], 1)), X))
B = np.zeros((X.shape[1], C))
Expand All @@ -42,50 +45,72 @@ def ce_loss(y_pred, y_true):

def test_train_softmaxreg():
# Test 1
X = np.array([[ 2.52569869, 2.33335813, 1.77303921, 0.41061103, -1.66484491],
[ 1.51013861, 1.30237106, 1.31989315, 1.36087958, 0.46381252],
[-2.09699866, -1.35960405, -1.04035503, -2.25481082, -0.32359947],
[-0.96660088, -0.60680633, -0.72017167, -1.73257187, -1.12811486],
[-0.38096611, -0.24852455, 0.18789426, 0.52359424, 1.30725962],
[ 0.54828787, 0.33156614, 0.10676247, 0.30694669, -0.37555384],
[-3.03393135, -2.01966141, -0.6546858 , -0.90330912, 2.89185791],
[ 0.28602304, -0.1265 , -0.52209915, 0.28309144, -0.5865882 ],
[-0.26268117, 0.76017979, 1.84095557, -0.23245038, 1.80716891],
[ 0.30283562, -0.40231495, -1.29550644, -0.1422727 , -1.78121713]])
X = np.array(
[
[2.52569869, 2.33335813, 1.77303921, 0.41061103, -1.66484491],
[1.51013861, 1.30237106, 1.31989315, 1.36087958, 0.46381252],
[-2.09699866, -1.35960405, -1.04035503, -2.25481082, -0.32359947],
[-0.96660088, -0.60680633, -0.72017167, -1.73257187, -1.12811486],
[-0.38096611, -0.24852455, 0.18789426, 0.52359424, 1.30725962],
[0.54828787, 0.33156614, 0.10676247, 0.30694669, -0.37555384],
[-3.03393135, -2.01966141, -0.6546858, -0.90330912, 2.89185791],
[0.28602304, -0.1265, -0.52209915, 0.28309144, -0.5865882],
[-0.26268117, 0.76017979, 1.84095557, -0.23245038, 1.80716891],
[0.30283562, -0.40231495, -1.29550644, -0.1422727, -1.78121713],
]
)
y = np.array([2, 3, 0, 0, 1, 3, 0, 1, 2, 1])
learning_rate = 3e-2
iterations = 10
expected_b = [[-0.0841, -0.5693, -0.3651, -0.2423, -0.5344, 0.0339],
[0.2566, 0.0535, -0.2104, -0.4004, 0.2709, -0.1461],
[-0.1318, 0.2109, 0.3998, 0.523, -0.1001, 0.0545],
[-0.0407, 0.3049, 0.1757, 0.1197, 0.3637, 0.0576]]
expected_losses = [13.8629, 10.7201, 9.3163, 8.4942, 7.9132,
7.4598, 7.0854, 6.7653, 6.4851, 6.2358]
expected_b = [
[-0.0841, -0.5693, -0.3651, -0.2423, -0.5344, 0.0339],
[0.2566, 0.0535, -0.2104, -0.4004, 0.2709, -0.1461],
[-0.1318, 0.2109, 0.3998, 0.523, -0.1001, 0.0545],
[-0.0407, 0.3049, 0.1757, 0.1197, 0.3637, 0.0576],
]
expected_losses = [
13.8629,
10.7201,
9.3163,
8.4942,
7.9132,
7.4598,
7.0854,
6.7653,
6.4851,
6.2358,
]
b, ce = train_softmaxreg(X, y, learning_rate, iterations)
assert b == expected_b and ce == expected_losses, 'Test case 1 failed'
assert b == expected_b and ce == expected_losses, "Test case 1 failed"

# Test 2
X = np.array([[-0.55605887, -0.74922526, -0.1913345 , 0.41584056],
[-1.05481124, -1.13763371, -1.28685937, -1.0710115 ],
[-1.17111877, -1.46866663, -0.75898143, 0.15915148],
[-1.21725723, -1.55590285, -0.69318542, 0.3580615 ],
[-1.90316075, -2.06075824, -2.2952422 , -1.87885386],
[-0.79089629, -0.98662696, -0.52955027, 0.07329079],
[ 1.97170638, 2.65609694, 0.6802377 , -1.47090364],
[ 1.46907396, 1.61396429, 1.69602021, 1.29791351],
[ 0.03095068, 0.15148081, -0.34698116, -0.74306029],
[-1.40292946, -1.99308861, -0.1478281 , 1.72332995]])
y = np.array([1., 0., 0., 1., 0., 1., 0., 1., 0., 1.])
X = np.array(
[
[-0.55605887, -0.74922526, -0.1913345, 0.41584056],
[-1.05481124, -1.13763371, -1.28685937, -1.0710115],
[-1.17111877, -1.46866663, -0.75898143, 0.15915148],
[-1.21725723, -1.55590285, -0.69318542, 0.3580615],
[-1.90316075, -2.06075824, -2.2952422, -1.87885386],
[-0.79089629, -0.98662696, -0.52955027, 0.07329079],
[1.97170638, 2.65609694, 0.6802377, -1.47090364],
[1.46907396, 1.61396429, 1.69602021, 1.29791351],
[0.03095068, 0.15148081, -0.34698116, -0.74306029],
[-1.40292946, -1.99308861, -0.1478281, 1.72332995],
]
)
y = np.array([1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0])
learning_rate = 1e-2
iterations = 7
expected_b = [[-0.0052, 0.0148, 0.0562, -0.113, -0.2488],
[0.0052, -0.0148, -0.0562, 0.113, 0.2488]]
expected_b = [
[-0.0052, 0.0148, 0.0562, -0.113, -0.2488],
[0.0052, -0.0148, -0.0562, 0.113, 0.2488],
]
expected_losses = [6.9315, 6.4544, 6.0487, 5.7025, 5.4055, 5.1493, 4.9269]
b, ce = train_softmaxreg(X, y, learning_rate, iterations)
assert b == expected_b and ce == expected_losses, 'Test case 2 failed'
assert b == expected_b and ce == expected_losses, "Test case 2 failed"

print('All tests passed')
print("All tests passed")


if __name__ == '__main__':
test_train_softmaxreg()
if __name__ == "__main__":
test_train_softmaxreg()
Loading