-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbare_torch_example.py
More file actions
48 lines (38 loc) · 1.34 KB
/
bare_torch_example.py
File metadata and controls
48 lines (38 loc) · 1.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
"""
Minimal bare PyTorch training loop with hotcb.
Run directory must be initialized first:
hotcb --dir runs/exp-001 init
"""
import torch
from hotcb.kernel import HotKernel
def main():
model = torch.nn.Linear(16, 1)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
mutable_state = {"weights": {"mse": 1.0}, "terms": {}, "ramps": {}}
kernel = HotKernel(run_dir="runs/exp-001", debounce_steps=1)
dataset = torch.utils.data.TensorDataset(
torch.randn(200, 16), torch.randn(200, 1)
)
loader = torch.utils.data.DataLoader(dataset, batch_size=32)
for epoch in range(5):
for step, (x, y) in enumerate(loader):
optimizer.zero_grad()
pred = model(x)
w = mutable_state["weights"].get("mse", 1.0)
loss = w * torch.nn.functional.mse_loss(pred, y)
loss.backward()
optimizer.step()
global_step = epoch * len(loader) + step
env = {
"step": global_step,
"epoch": epoch,
"phase": "train",
"model": model,
"optimizer": optimizer,
"loss": loss,
"mutable_state": mutable_state,
"log": print,
}
kernel.apply(env, events=["train_step_end"])
if __name__ == "__main__":
main()