optimizer, loss for tutorial cnn

This commit is contained in:
KeshavAnandCode
2026-03-18 18:02:21 -05:00
parent 255a9aa73c
commit 5cee36c80e

View File

@@ -336,6 +336,29 @@
"model = Net().to(device)\n",
"print(model)"
]
},
{
"cell_type": "markdown",
"id": "22e71032",
"metadata": {},
"source": [
"Loss fn and optimizer"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "54d11a04",
"metadata": {},
"outputs": [],
"source": [
"import torch.optim as optim\n",
"\n",
"criterion = nn.CrossEntropyLoss() # Applied softmax to convert scores --> probabilities --> penalizes model \n",
"\n",
"# SGD is used on tutorial, but Adam optimizer is more popular as well\n",
"optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Defualt learning rate from tutorial: compromise btwn overshoot and training infinitely long, 0.9 --> 90% of update comes from prev. direction\n"
]
}
],
"metadata": {