optimizer, loss for tutorial cnn
This commit is contained in:
@@ -336,6 +336,29 @@
|
|||||||
"model = Net().to(device)\n",
|
"model = Net().to(device)\n",
|
||||||
"print(model)"
|
"print(model)"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "22e71032",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Loss fn and optimizer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "54d11a04",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import torch.optim as optim\n",
|
||||||
|
"\n",
|
||||||
|
"criterion = nn.CrossEntropyLoss() # Applied softmax to convert scores --> probabilities --> penalizes model \n",
|
||||||
|
"\n",
|
||||||
|
"# SGD is used on tutorial, but Adam optimizer is more popular as well\n",
|
||||||
|
"optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Defualt learning rate from tutorial: compromise btwn overshoot and training infinitely long, 0.9 --> 90% of update comes from prev. direction\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
Reference in New Issue
Block a user