[Pytorch & Tensorflow2.x 기초] CNN 모델로 Pytorch와 Tensorflow2.x의 기본적인 모델 구현 확인하기

2020. 9. 1. 21:44DL in Python

2-16. TensorFlow2.0과 PyTorch 비교 [before]

TensorFlow 2.0

In [1]:
import tensorflow as tf
from tensorflow.keras import layers

from tensorflow.keras import datasets 

Hyperparameter Tunning

In [2]:
num_epochs = 1
batch_size = 64

learning_rate = 0.001

dropout_rate = 0.7

input_shape = (28, 28, 1)
num_classes = 10

Preprocess

In [3]:
# data load
(train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
In [5]:
# 차원 늘리기
train_x = train_x[..., tf.newaxis] 
test_x = test_x[..., tf.newaxis]

# normalization
train_x = train_x / 255.
test_x = test_x / 255.

Build Model

In [6]:
inputs = layers.Input(input_shape)
net = layers.Conv2D(32, (3, 3), padding='SAME')(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(32, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(dropout_rate)(net)

net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(dropout_rate)(net)

net = layers.Flatten()(net)
net = layers.Dense(512)(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(dropout_rate)(net)
net = layers.Dense(num_classes)(net)
net = layers.Activation('softmax')(net)

model = tf.keras.Model(inputs=inputs, outputs=net, name='Basic_CNN')
In [7]:
# Model is the full model w/o custom layers
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),  # Optimization
              loss='sparse_categorical_crossentropy',  # Loss Function 
              metrics=['accuracy'])  # Metrics / Accuracy

Training

In [8]:
model.fit(train_x, train_y,   
             batch_size=batch_size, 
             shuffle=True)

model.evaluate(test_x, test_y, batch_size=batch_size)
938/938 [==============================] - 171s 183ms/step - loss: 2.3019 - accuracy: 0.1122
157/157 [==============================] - 6s 41ms/step - loss: 2.3010 - accuracy: 0.1135
Out[8]:
[2.3010494709014893, 0.11349999904632568]

PyTorch

In [10]:
import torch

# 모델 설계를 위한 라이브러리
import torch.nn as nn
import torch.nn.functional as F

# optimization을 위한 라이브러리
import torch.optim as optim

# data load와 rescale을 위한 라이브러리
from torchvision import datasets, transforms

Hyperparameter Tunning

In [15]:
seed = 1

lr = 0.001
momentum = 0.5

batch_size = 64
test_batch_size = 64

epochs = 1

# gpu 사용?
no_cuda = False

# pytorch는 progress bar가 돌아가지 않아서 직접 출력을 통해 확인하기 위한 학습 로그 interval을 생성
log_interval = 100

Model

In [12]:
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)  # tensorflow는 in-channel없이 out-channel만 사용, 반면 pytorch는 둘다 필요
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4*4*50, 500)    # tensorflow의 dense 역할
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4*4*50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

Preprocess

In [13]:
torch.manual_seed(seed)

# gpu 있으면 사용
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")


kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

# data loader
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=batch_size, shuffle=True, **kwargs)



test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=test_batch_size, shuffle=True, **kwargs)
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ../data\MNIST\raw\train-images-idx3-ubyte.gz
Extracting ../data\MNIST\raw\train-images-idx3-ubyte.gz to ../data\MNIST\raw
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz to ../data\MNIST\raw\train-labels-idx1-ubyte.gz
Extracting ../data\MNIST\raw\train-labels-idx1-ubyte.gz to ../data\MNIST\raw
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz to ../data\MNIST\raw\t10k-images-idx3-ubyte.gz

Extracting ../data\MNIST\raw\t10k-images-idx3-ubyte.gz to ../data\MNIST\raw
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz to ../data\MNIST\raw\t10k-labels-idx1-ubyte.gz
Extracting ../data\MNIST\raw\t10k-labels-idx1-ubyte.gz to ../data\MNIST\raw
Processing...
C:\Users\user\Anaconda3\lib\site-packages\torchvision\datasets\mnist.py:469: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at  ..\torch\csrc\utils\tensor_numpy.cpp:141.)
  return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
Done!

Optimization

In [14]:
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

Training

In [16]:
for epoch in range(1, epochs + 1):
    # Train Mode
    model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()  # backpropagation 계산하기 전에 0으로 기울기 계산
        output = model(data)
        loss = F.nll_loss(output, target)  # https://pytorch.org/docs/stable/nn.html#nll-loss
        loss.backward()  # 계산한 기울기를 
        optimizer.step()  

        if batch_idx % log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    
    # Test mode
    model.eval()  # batch norm이나 dropout 등을 train mode 변환
    test_loss = 0
    correct = 0
    with torch.no_grad():  # autograd engine, 즉 backpropagatin이나 gradient 계산 등을 꺼서 memory usage를 줄이고 속도를 높임
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()  # pred와 target과 같은지 확인

    test_loss /= len(test_loader.dataset)

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
Train Epoch: 1 [0/60000 (0%)]	Loss: 2.300039
Train Epoch: 1 [6400/60000 (11%)]	Loss: 2.239659
Train Epoch: 1 [12800/60000 (21%)]	Loss: 2.133954
Train Epoch: 1 [19200/60000 (32%)]	Loss: 2.007281
Train Epoch: 1 [25600/60000 (43%)]	Loss: 1.656332
Train Epoch: 1 [32000/60000 (53%)]	Loss: 1.400323
Train Epoch: 1 [38400/60000 (64%)]	Loss: 0.864234
Train Epoch: 1 [44800/60000 (75%)]	Loss: 0.613059
Train Epoch: 1 [51200/60000 (85%)]	Loss: 0.615209
Train Epoch: 1 [57600/60000 (96%)]	Loss: 0.427826

Test set: Average loss: 0.4822, Accuracy: 8650/10000 (86%)