오답노트

PyTorch (CNN) - MNIST 를 활용한 기본 예제 본문

PyTorch

PyTorch (CNN) - MNIST 를 활용한 기본 예제

장비 정 2021. 5. 24. 11:37
# libraries import
import torch

from torch import nn
from torch.utils.data import Dataloader
from torchvision import datasets
from torchvision.transform import ToTensor

# data
training_data = datasets.MNIST(
    root = 'data',
    train = True,
    download = True,
    transforms = ToTensor()
)

test_data = datasets.MNIST(
    root = 'data',
    train = False,
    download = True,
    transforms = ToTensor()
)

train_load = Dataloader(
    training_data, batch_size = 64
)

test_load = Dataloader(
    test_data, batch_size = 64
)

# modeling
device = 'cuda' if torch.cuda.is_available() else 'cpu'

class NeuralNetwork(nn.Module):
    def __init__(self):
        super(NeuralNetwork, self).__init__
        
        self.layer1 = nn.Sequential(
             nn.Conv2d(1, 32, 1, 1), # 채널 1, output 32, kernel 1, stride 1
             nn.ReLu(),
             nn.MaxPool2d(2, 2) # pooling (2, 2)
             ) # 해당 레이어를 거치면서 이미지는 28, 28, 1 에서 14, 14, 32 로 바뀜
             
         self.layer2 = nn.Sequential(
             nn.Conv2d(32, 64, 1, 1), # layer1 에서 output 이 32 였으므로 input 채널은 32
             nn.ReLu(),
             nn.MaxPool2d(2, 2)
             ) # 해당 레이어를 거치면서 14, 14, 32 가 7, 7, 64 로 바뀜
             
         self.fclayer = nn.Sequential(
             nn.Linear(64 * 7 * 7, 10) # layer 2 에서 output 의 데이터가 7, 7, 64 이므로
             )                           전결합층에서 전부 곱해주고 label 갯수만큼인 10을 output 으로 빼준다
         
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out.view(out.size(0), -1)
        out = self.fclayer(out)
        return out # Model 형처럼 위에서 선언 된 레이어들을 쌓아주고 최종 레이어를 return 시킨다

# train, test
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters, lr = 1e-3)

def train(dataloader, model, loss_fn, optimizer):
    for batch, (x, y) in enumerate(dataloder):
        x, y = x.to(device), y.to(device)
        
        optimizer.zero_grad()
        pred = model(x)
        loss = loss_fn(pred, y)
        
        loss.backward()
        optimizer.step()
        
        if batch % 100 == 0:
            loss = loss.item()
            print(f'loss : {loss:.4f}')
            
def test(dataloader, model): # test 에서는 훈련을 시키지 않는다
    size = len(dataloader)
    model.eval()
    
    avg_cost = 0
    correct = 0
    
    with torch.no_grad():
        for x, y in dataloader:
            x, y = x.to(device), y.to(device)
            
            pred = model(x)
            loss += loss_fn(pred, y).item()
            correct += correct(pred.argmax(1) == y).type(torch.float).sum().device()
            
        loss /= size
        correct /= size
        
        print(f'loss : {loss:.4f}, acc : {acc:.4f}')

# fit
epochs = 10
for epoch in range(epochs):
    print(f'epochs : {epoch + 1}')
    train(train_load, model, loss_fn, optimizer)
    test(test_load, model, loss_fn, optimizer)

# results
'''
epochs : 1
loss : 2.2979
loss : 0.8552
loss : 0.4504
loss : 0.6528
loss : 0.5799
loss : 0.6271
loss : 0.5192
loss : 0.6456
loss : 0.5720
loss : 0.4313
loss : 0.0073, acc : 0.8310
epochs : 2
loss : 0.3200
loss : 0.4334
loss : 0.3016
loss : 0.5564
loss : 0.4419
loss : 0.5735
loss : 0.3947
loss : 0.5986
loss : 0.5506
loss : 0.3880
loss : 0.0067, acc : 0.8472
epochs : 3
loss : 0.2727
loss : 0.3916
loss : 0.2538
loss : 0.5271
loss : 0.3970
loss : 0.5702
loss : 0.3501
loss : 0.5780
loss : 0.5436
loss : 0.3697
loss : 0.0065, acc : 0.8507
epochs : 4
loss : 0.2567
loss : 0.3654
loss : 0.2323
loss : 0.5003
loss : 0.3759
loss : 0.5598
loss : 0.3208
loss : 0.5617
loss : 0.5376
loss : 0.3580
loss : 0.0064, acc : 0.8521
epochs : 5
loss : 0.2515
loss : 0.3430
loss : 0.2199
loss : 0.4779
loss : 0.3693
loss : 0.5455
loss : 0.3031
loss : 0.5483
loss : 0.5264
loss : 0.3530
loss : 0.0063, acc : 0.8555
epochs : 6
loss : 0.2458
loss : 0.3225
loss : 0.2096
loss : 0.4602
loss : 0.3650
loss : 0.5342
loss : 0.2900
loss : 0.5324
loss : 0.5134
loss : 0.3455
loss : 0.0062, acc : 0.8579
epochs : 7
loss : 0.2404
loss : 0.3042
loss : 0.2001
loss : 0.4442
loss : 0.3589
loss : 0.5230
loss : 0.2808
loss : 0.5194
loss : 0.5025
loss : 0.3391
loss : 0.0062, acc : 0.8604
epochs : 8
loss : 0.2354
loss : 0.2907
loss : 0.1910
loss : 0.4351
loss : 0.3523
loss : 0.5146
loss : 0.2747
loss : 0.5052
loss : 0.4966
loss : 0.3318
loss : 0.0061, acc : 0.8624
epochs : 9
loss : 0.2323
loss : 0.2825
loss : 0.1834
loss : 0.4271
loss : 0.3385
loss : 0.5106
loss : 0.2709
loss : 0.4968
loss : 0.4939
loss : 0.3254
loss : 0.0060, acc : 0.8660
epochs : 10
loss : 0.2281
loss : 0.2760
loss : 0.1788
loss : 0.4215
loss : 0.3296
loss : 0.5050
loss : 0.2666
loss : 0.4874
loss : 0.4935
loss : 0.3213
loss : 0.0059, acc : 0.8663
'''

'PyTorch' 카테고리의 다른 글

PyTorch - torch 기본 (autograd)  (0) 2021.05.26
PyTorch - torch 기본 (tensor)  (0) 2021.05.26
PyTorch - torch 기본 (matrix)  (0) 2021.05.26
PyTorch - torch 기본 (vector)  (0) 2021.05.26
PyTorch - torch 기본 (scalar)  (0) 2021.05.26