printf("ho_tari\n");
ep.32 딥러닝개론6 본문
2024.8.21
Softmax
# Lab 6 Softmax Classifier
import torch
from torch.autograd import Variable
torch.manual_seed(777) # for reproducibility
x_data = [[1, 2, 1, 1], [2, 1, 3, 2], [3, 1, 3, 4], [4, 1, 5, 5],
[1, 7, 5, 5], [1, 2, 5, 6], [1, 6, 6, 6], [1, 7, 7, 7]]
y_data = [[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0],
[0, 1, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]
X = Variable(torch.Tensor(x_data))
Y = Variable(torch.Tensor(y_data))
nb_classes = 3
# tf.nn.softmax computes softmax activations
# softmax = exp(logits) / reduce_sum(exp(logits), dim)
softmax = torch.nn.Softmax()
linear = torch.nn.Linear(4, nb_classes, bias=True) # 4 : input개수
model = torch.nn.Sequential(linear, softmax) # 출력이 다중 출력이라고 생각하여 softmax 사용
optimizer = torch.optim.SGD(model.parameters(), lr=0.1) # lr : learning rate (기울기)
for step in range(2001):
optimizer.zero_grad()
hypothesis = model(X)
# Cross entropy cost/loss
cost = -Y * torch.log(hypothesis)
cost = torch.sum(cost, 1).mean()
cost.backward()
optimizer.step()
if step % 200 == 0:
print(step, cost.data.numpy())
# Testing & One-hot encoding
print('--------------')
a = model(Variable(torch.Tensor([[1, 11, 7, 9]])))
print(a.data.numpy(), torch.max(a, 1)[1].data.numpy())
print('--------------')
b = model(Variable(torch.Tensor([[1, 3, 4, 3]])))
print(b.data.numpy(), torch.max(b, 1)[1].data.numpy())
print('--------------')
c = model(Variable(torch.Tensor([[1, 1, 0, 1]])))
print(c.data.numpy(), torch.max(c, 1)[1].data.numpy())
print('--------------')
all = model(Variable(torch.Tensor([[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]])))
print(all.data.numpy(), torch.max(all, 1)[1].data.numpy())
# Lab 6 Softmax Classifier
import torch
from torch.autograd import Variable
import numpy as np
torch.manual_seed(777) # for reproducibility
# Predicting animal type based on various features
xy = np.loadtxt('/content/drive/MyDrive/data-04-zoo.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
print(x_data.shape, y_data.shape)
nb_classes = 7 # 0 ~ 6
X = Variable(torch.from_numpy(x_data))
Y = Variable(torch.from_numpy(y_data))
# one hot encoding
Y_one_hot = torch.zeros(Y.size()[0], nb_classes)
Y_one_hot.scatter_(1, Y.long().data, 1)
Y_one_hot = Variable(Y_one_hot)
print("one_hot", Y_one_hot.data)
softmax = torch.nn.Softmax()
model = torch.nn.Linear(16, nb_classes, bias=True)
# Cross entropy cost/loss
criterion = torch.nn.CrossEntropyLoss() # Softmax is internally computed.
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
for step in range(2001):
optimizer.zero_grad()
hypothesis = model(X)
# Label has to be 1D LongTensor
cost = criterion(hypothesis, Y.long().view(-1))
cost.backward()
optimizer.step()
prediction = torch.max(softmax(hypothesis), 1)[1].float()
correct_prediction = (prediction.data == Y.data)
accuracy = correct_prediction.float().mean()
if step % 100 == 0:
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(step, cost.item(), accuracy))
# Let's see if we can predict
pred = torch.max(softmax(hypothesis), 1)[1].float()
for p, y in zip(pred, Y):
print("[{}] Prediction: {} True Y: {}".format(bool(p.item() == y.item()), int(p.item()), int(y.item())))
'두산 로보틱스 부트캠프 ROKEY > Computer Vision 교육' 카테고리의 다른 글
ep.34 딥러닝개론8 (0) | 2024.08.23 |
---|---|
ep.33 딥러닝개론7 (0) | 2024.08.22 |
ep.31 딥러닝개론5 (0) | 2024.08.20 |
ep.30 딥러닝개론4 (0) | 2024.08.19 |
ep.29 딥러닝개론3 (0) | 2024.08.16 |