Skip to content

Commit

Permalink
better documentation, removed old code
Browse files Browse the repository at this point in the history
  • Loading branch information
Eduardo Leao authored and Eduardo Leao committed Mar 12, 2024
1 parent 372669b commit fa7a5b5
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 10 deletions.
2 changes: 0 additions & 2 deletions neuralforge/nn/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ def forward(self, x):
out = self.residual_proj(out) # (B, T, D) @ (D, D) -> (B, T, D)
out = self.residual_dropout(out)

self.cache = (att, k, v, q)
return out

# Embedding Layers
Expand All @@ -142,7 +141,6 @@ def forward(self, idx):
# Extracts embedding from row "idx":
x = self.E[idx._data]

self.cache = (idx)
return x


Expand Down
6 changes: 3 additions & 3 deletions neuralforge/tensor_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -816,17 +816,17 @@ def forward(self, a, condition, value):
# Add new Tensors to "children" and old Tensors to "parents":
self.parents = (a,)
a.children.append(z)
self.cache = (a)
self.cache = (a, condition)

return z

def backward(self, dz, z):
a = self.cache
a, condition = self.cache

# Find gradients relative to "a", and pass it downstream:
if a.requires_grad:
# Because some activations are just set to a value, this operation is not differentiable.
da = dz
da = np.where(condition, dz, 0)

a.backward(da, z)

Expand Down
7 changes: 2 additions & 5 deletions tests/test_framework.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import sys
sys.path.append('..')
import neuralforge as forge
import neuralforge.nn as nn
import unittest
Expand Down Expand Up @@ -173,7 +171,7 @@ def forward(self, x):
hidden_size = 128
batch_size = 4
n_heads = 8
dropout_p = 0.1
dropout_p = 0.2

# Get path to root of repository:
PATH = '/'.join(os.getcwd().split('/')[:-1])
Expand All @@ -189,7 +187,7 @@ def forward(self, x):

# Define loss function and optimizer:
loss_func = nn.CrossEntropyLoss()
optimizer = nn.optim.Adam(model.parameters(), lr=0.005, reg=0)
optimizer = nn.optim.Adam(model.parameters(), lr=5e-3, reg=0)

# Training Loop:
for _ in range(n_iters):
Expand All @@ -208,7 +206,6 @@ def forward(self, x):

# Reset the gradients to zero after each training step:
optimizer.zero_grad()
print(loss)

assert loss._data < 1, "Error: Loss is not converging to zero in autograd test."

Expand Down

0 comments on commit fa7a5b5

Please sign in to comment.