Skip to content

Commit

Permalink
changes to quick test lenet-train using more builtins (that need CUDA)
Browse files Browse the repository at this point in the history
these builtins still need proper script wrapping
  • Loading branch information
corepointer committed Jul 15, 2024
1 parent 2e51c57 commit 66df10a
Showing 1 changed file with 16 additions and 8 deletions.
24 changes: 16 additions & 8 deletions scripts/nn/networks/lenet-train.daph
Original file line number Diff line number Diff line change
Expand Up @@ -100,24 +100,28 @@ def predict(X:matrix, C, Hin, Win, W1:matrix, b1:matrix, W2:matrix, b2:matrix, W
# Compute forward pass
## layer 1: conv1 -> relu1 -> pool1
outc1, Houtc1, Woutc1 = conv2d.forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride, pad, pad);
outr1 = relu.forward(outc1);
outr1 = relu(outc1);
#outr1 = relu.forward(outc1);
outp1, Houtp1, Woutp1 = max_pool2d.forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0);
print("predict fwd layer 1 done");
## layer 2: conv2 -> relu2 -> pool2
outc2, Houtc2, Woutc2 = conv2d.forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf, stride, stride, pad, pad);
outr2 = relu.forward(outc2);
#outr2 = relu.forward(outc2);
outr2 = relu(outc2);
outp2, Houtp2, Woutp2 = max_pool2d.forward(outr2, F2, Houtp1, Woutp1, 2, 2, 2, 2, 0, 0);
print("predict fwd layer 2 done");
## layer 3: affine3 -> relu3 -> dropout
# outa3 = affine.forward(outp2, W3, b3);
outa3 = affine(outp2, W3, b3);
outr3 = relu.forward(outa3);
#outr3 = relu.forward(outa3);
outr3 = relu(outa3);
outd3, maskd3 = dropout.forward(outr3, 0.5, -1);
print("predict fwd layer 3 done");
## layer 4: affine4 -> softmax
#outa4 = affine.forward(outd3, W4, b4);
outa4 = affine(outd3, W4, b4);
probs_batch = softmax.forward(outa4);
#probs_batch = softmax.forward(outa4);
probs_batch = softmax(outa4);
print("predict fwd layer 4 done");
# Store predictions
probs[beg:end,] = probs_batch;
Expand Down Expand Up @@ -218,25 +222,29 @@ print("start training layer 1");
## layer 1: conv1 -> relu1 -> pool1
outc1, Houtc1, Woutc1 = conv2d.forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride, pad, pad);
print("conv1 done");
outr1 = relu.forward(outc1);
#outr1 = relu.forward(outc1);
outr1 = relu(outc1);
print("layer 1 relu done");
outp1, Houtp1, Woutp1 = max_pool2d.forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0);
print("train fwd layer 1 done");
## layer 2: conv2 -> relu2 -> pool2
outc2, Houtc2, Woutc2 = conv2d.forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf, stride, stride, pad, pad);
outr2 = relu.forward(outc2);
#outr2 = relu.forward(outc2);
outr2 = relu(outc2);
outp2, Houtp2, Woutp2 = max_pool2d.forward(outr2, F2, Houtp1, Woutp1, 2, 2, 2, 2, 0, 0);
print("train fwd layer 2 done");
## layer 3: affine3 -> relu3 -> dropout
#outa3 = affine.forward(outp2, W3, b3);
outa3 = affine(outp2, W3, b3);
outr3 = relu.forward(outa3);
#outr3 = relu.forward(outa3);
outr3 = relu(outa3);
outd3, maskd3 = dropout.forward(outr3, 0.5, -1);
print("train fwd layer 3 done");
## layer 4: affine4 -> softmax
#outa4 = affine.forward(outd3, W4, b4);
outa4 = affine(outd3, W4, b4);
probs = softmax.forward(outa4);
#probs = softmax.forward(outa4);
probs = softmax(outa4);
print("train fwd layer 4 done");
# Compute loss & accuracy for training & validation data every 100 iterations.
if (i % 100 == 0)
Expand Down

0 comments on commit 66df10a

Please sign in to comment.