From 1de008d16833228f1e73e86069298742881ade07 Mon Sep 17 00:00:00 2001 From: manu12121999 Date: Mon, 9 Dec 2024 23:41:51 +0100 Subject: [PATCH] more CI/CD --- .github/workflows/python-package.yml | 12 ++++--- README.md | 5 +-- ctrl_c_nn.py | 3 +- test/test_tensor.py | 52 ++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 7 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 97277fa..b07582b 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -23,12 +23,16 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install numpy run: | - pip install numpy + pip install numpy coverage flake8 - name: Test unittests run: | - python -m unittest discover -s ./test -p 'test_*.py' -# - name: Lint with flake8 -# run: | + python -m coverage run -m unittest discover -s ./test -p 'test_*.py' + - name: Coverage + run: | + coverage report --include=ctrl_c_nn.py + - name: Lint with flake8 + run: | + flake8 ctrl_c_nn.py --ignore=E202,E501,F401,E226 --count --show-source --statistics # # stop the build if there are Python syntax errors or undefined names # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide diff --git a/README.md b/README.md index cbf4676..b5c35f3 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ g = e.permute((3,0,2,1)) # shape (1, 1, 4, 2) ``` ## Sample Usage NN +```python from ctrl_c_nn import nn, Tensor model = nn.Sequential( @@ -60,9 +61,9 @@ for i in range(2000): output_tensor = model(input_tensor) loss = loss_fn(output_tensor, target_tensor) - print("loss", loss.item(), " iteration", i) + print("loss", loss.item(), " iteration", i) dout = loss_fn.backward(loss) dout = model.backward(dout) model.update(lr=0.001) - +``` \ No newline at end of file diff --git a/ctrl_c_nn.py b/ctrl_c_nn.py index 1f6891b..4af04ea 100644 --- a/ctrl_c_nn.py +++ b/ctrl_c_nn.py @@ -7,6 +7,7 @@ sumprod = math.sumprod if sys.version_info >= (3, 12) else lambda p, q: sum([p_i*q_i for p_i, q_i in zip(p, q)]) + class LLOps: """ Class for (recursive) functional operations on lists of lists @@ -662,7 +663,7 @@ def forward(self, x: Tensor): result = Tensor.zeros((B, C_out, H_out, W_out)) for u in range(self.padding, H - self.padding, self.stride): for v in range(self.padding, H - self.padding, self.stride): - x_chunk = x[:, :, u:u+K, v:v+K].reshape((B,C_in, K*K)) # reshaped from B, C_in, K, K + x_chunk = x[:, :, u:u+K, v:v+K].reshape((B, C_in, K*K)) # reshaped from B, C_in, K, K result[:, :, u, v] = x_chunk @ self.w + self.b # (B,C_in, K,K) @( C_out, C_in, K, K) + ( C_out) # TODO complete diff --git a/test/test_tensor.py b/test/test_tensor.py index 4aaee55..2339655 100644 --- a/test/test_tensor.py +++ b/test/test_tensor.py @@ -1,5 +1,8 @@ import unittest import numpy as np + +from ctrl_c_nn import Tensor + try: import torch except ImportError: @@ -148,6 +151,16 @@ def test_basic_mul(self): result_B = ccm.Tensor(mat1.tolist()) * ccm.Tensor(mat2.tolist()) self.assertEqual(result_A.tolist(), result_B.tolist(),f"Tensor operation basic mul failed.") + def test_mul_dimless_tensor(self): + mat1 = np.array([3, 2]) + mat2 = np.array(2) + result_A = mat1 * mat2 + result_B = ccm.Tensor(mat1.tolist()) * ccm.Tensor(mat2.tolist()) + self.assertEqual(result_A.tolist(), result_B.tolist(), f"Tensor operation dimless mul failed.") + result_C = mat2 * mat1 + result_D = ccm.Tensor(mat2.tolist()) * ccm.Tensor(mat1.tolist()) + self.assertEqual(result_C.tolist(), result_D.tolist(), f"Tensor operation dimless mul failed.") + def test_basic_matmul(self): mat1 = np.random.randint(0, 10, size=(400, 200)) mat2 = np.random.randint(0, 10, size=(200, 300)) @@ -319,3 +332,42 @@ def test_permute(self): tensor_ctrlc = ccm.Tensor(tensor_np.tolist()) self.assertEqual(np.transpose(tensor_np, (4, 1, 0, 3, 5, 2)).tolist(), tensor_ctrlc.permute((4, 1, 0, 3, 5, 2)).tolist(), f"Tensor permute 6d does not work.") + +class TestTensorCreation(unittest.TestCase): + + def test_create(self): + l1 = [[2, 1, 3], [4, 2, 1]] + self.assertEqual(l1, Tensor(l1).tolist(), f"Tensor create and to_list is not identical.") + shape = (3, 2, 1) + t1 = Tensor.zeros(shape) + t2 = Tensor.ones(shape) + t3 = Tensor.random_float(shape) + t4 = Tensor.random_int(shape) + t5 = Tensor.random_float(shape, min=-2, max=+2) + t6 = Tensor.random_int(shape, min=-2, max=+2) + t7 = Tensor.fill(shape, 27) + self.assertEqual(shape, t1.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t2.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t3.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t4.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t5.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t6.shape, f"Tensor create gives wrong shape.") + self.assertEqual(shape, t7.shape, f"Tensor create gives wrong shape.") + + def test_create_single_int(self): + n1 = np.array(4) + t1 = Tensor(4) + self.assertEqual(n1.shape, t1.shape, f"Wrong shape when create tensor from int.") + self.assertEqual(n1.ndim, t1.ndim, f"Wrong ndim when create tensor from int.") + + self.assertEqual((n1 * n1).shape, (t1 * t1).shape, f"Wrong shape when create tensor from int.") + self.assertEqual((n1 * n1).ndim, (t1 * t1).ndim, f"Wrong ndim when create tensor from int.") + + n2 = np.array([8, 3, 4]) + t2 = Tensor([8, 3, 4]) + + self.assertEqual((n1*n2).shape, (t1*t2).shape, f"Wrong shape when create tensor from int.") + self.assertEqual((n1*n2).ndim, (t1*t2).ndim, f"Wrong ndim when create tensor from int.") + + self.assertEqual((n2*n1).shape, (t2*t1).shape, f"Wrong shape when create tensor from int.") + self.assertEqual((n2*n1).ndim, (t2*t1).ndim, f"Wrong ndim when create tensor from int.")