|
| 1 | +# Copyright (c) 2017, PyTorch Team |
| 2 | +# All rights reserved |
| 3 | +# Licensed under BSD 3-Clause License. |
| 4 | + |
| 5 | +# This example is based on PyTorch MNIST example: |
| 6 | +# https://github.com/pytorch/examples/blob/master/mnist/main.py |
| 7 | + |
| 8 | +import mlflow |
| 9 | +import mlflow.pytorch |
| 10 | +from mlflow.utils.environment import _mlflow_conda_env |
| 11 | +import warnings |
| 12 | +import cloudpickle |
| 13 | +import torch |
| 14 | +import torch.nn as nn |
| 15 | +import torch.nn.functional as F |
| 16 | +import torch.optim as optim |
| 17 | +import torchvision |
| 18 | +from torchvision import datasets, transforms |
| 19 | + |
| 20 | + |
| 21 | +class Net(nn.Module): |
| 22 | + def __init__(self): |
| 23 | + super(Net, self).__init__() |
| 24 | + self.conv1 = nn.Conv2d(1, 20, 5, 1) |
| 25 | + self.conv2 = nn.Conv2d(20, 50, 5, 1) |
| 26 | + self.fc1 = nn.Linear(4 * 4 * 50, 500) |
| 27 | + self.fc2 = nn.Linear(500, 10) |
| 28 | + |
| 29 | + def forward(self, x): |
| 30 | + # Added the view for reshaping score requests |
| 31 | + x = x.view(-1, 1, 28, 28) |
| 32 | + x = F.relu(self.conv1(x)) |
| 33 | + x = F.max_pool2d(x, 2, 2) |
| 34 | + x = F.relu(self.conv2(x)) |
| 35 | + x = F.max_pool2d(x, 2, 2) |
| 36 | + x = x.view(-1, 4 * 4 * 50) |
| 37 | + x = F.relu(self.fc1(x)) |
| 38 | + x = self.fc2(x) |
| 39 | + return F.log_softmax(x, dim=1) |
| 40 | + |
| 41 | + |
| 42 | +def train(args, model, device, train_loader, optimizer, epoch): |
| 43 | + model.train() |
| 44 | + for batch_idx, (data, target) in enumerate(train_loader): |
| 45 | + data, target = data.to(device), target.to(device) |
| 46 | + optimizer.zero_grad() |
| 47 | + output = model(data) |
| 48 | + loss = F.nll_loss(output, target) |
| 49 | + loss.backward() |
| 50 | + optimizer.step() |
| 51 | + if batch_idx % args.log_interval == 0: |
| 52 | + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( |
| 53 | + epoch, batch_idx * len(data), len(train_loader.dataset), |
| 54 | + 100. * batch_idx / len(train_loader), loss.item())) |
| 55 | + # Use MLflow logging |
| 56 | + mlflow.log_metric("epoch_loss", loss.item()) |
| 57 | + |
| 58 | + |
| 59 | +def test(args, model, device, test_loader): |
| 60 | + model.eval() |
| 61 | + test_loss = 0 |
| 62 | + correct = 0 |
| 63 | + with torch.no_grad(): |
| 64 | + for data, target in test_loader: |
| 65 | + data, target = data.to(device), target.to(device) |
| 66 | + output = model(data) |
| 67 | + # sum up batch loss |
| 68 | + test_loss += F.nll_loss(output, target, reduction="sum").item() |
| 69 | + # get the index of the max log-probability |
| 70 | + pred = output.argmax(dim=1, keepdim=True) |
| 71 | + correct += pred.eq(target.view_as(pred)).sum().item() |
| 72 | + |
| 73 | + test_loss /= len(test_loader.dataset) |
| 74 | + print("\n") |
| 75 | + print("Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( |
| 76 | + test_loss, correct, len(test_loader.dataset), |
| 77 | + 100. * correct / len(test_loader.dataset))) |
| 78 | + # Use MLflow logging |
| 79 | + mlflow.log_metric("average_loss", test_loss) |
| 80 | + |
| 81 | + |
| 82 | +class Args(object): |
| 83 | + pass |
| 84 | + |
| 85 | + |
| 86 | +# Training settings |
| 87 | +args = Args() |
| 88 | +setattr(args, 'batch_size', 64) |
| 89 | +setattr(args, 'test_batch_size', 1000) |
| 90 | +setattr(args, 'epochs', 3) # Higher number for better convergence |
| 91 | +setattr(args, 'lr', 0.01) |
| 92 | +setattr(args, 'momentum', 0.5) |
| 93 | +setattr(args, 'no_cuda', True) |
| 94 | +setattr(args, 'seed', 1) |
| 95 | +setattr(args, 'log_interval', 10) |
| 96 | +setattr(args, 'save_model', True) |
| 97 | + |
| 98 | +use_cuda = not args.no_cuda and torch.cuda.is_available() |
| 99 | + |
| 100 | +torch.manual_seed(args.seed) |
| 101 | + |
| 102 | +device = torch.device("cuda" if use_cuda else "cpu") |
| 103 | + |
| 104 | +kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} |
| 105 | +train_loader = torch.utils.data.DataLoader( |
| 106 | + datasets.MNIST('../data', train=True, download=True, |
| 107 | + transform=transforms.Compose([ |
| 108 | + transforms.ToTensor(), |
| 109 | + transforms.Normalize((0.1307,), (0.3081,)) |
| 110 | + ])), |
| 111 | + batch_size=args.batch_size, shuffle=True, **kwargs) |
| 112 | +test_loader = torch.utils.data.DataLoader( |
| 113 | + datasets.MNIST( |
| 114 | + '../data', |
| 115 | + train=False, |
| 116 | + transform=transforms.Compose([ |
| 117 | + transforms.ToTensor(), |
| 118 | + transforms.Normalize((0.1307,), (0.3081,))])), |
| 119 | + batch_size=args.test_batch_size, shuffle=True, **kwargs) |
| 120 | + |
| 121 | + |
| 122 | +def driver(): |
| 123 | + warnings.filterwarnings("ignore") |
| 124 | + # Dependencies for deploying the model |
| 125 | + pytorch_index = "https://download.pytorch.org/whl/" |
| 126 | + pytorch_version = "cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl" |
| 127 | + deps = [ |
| 128 | + "cloudpickle=={}".format(cloudpickle.__version__), |
| 129 | + pytorch_index + pytorch_version, |
| 130 | + "torchvision=={}".format(torchvision.__version__), |
| 131 | + "Pillow=={}".format("6.0.0") |
| 132 | + ] |
| 133 | + with mlflow.start_run() as run: |
| 134 | + model = Net().to(device) |
| 135 | + optimizer = optim.SGD( |
| 136 | + model.parameters(), |
| 137 | + lr=args.lr, |
| 138 | + momentum=args.momentum) |
| 139 | + for epoch in range(1, args.epochs + 1): |
| 140 | + train(args, model, device, train_loader, optimizer, epoch) |
| 141 | + test(args, model, device, test_loader) |
| 142 | + # Log model to run history using MLflow |
| 143 | + if args.save_model: |
| 144 | + model_env = _mlflow_conda_env(additional_pip_deps=deps) |
| 145 | + mlflow.pytorch.log_model(model, "model", conda_env=model_env) |
| 146 | + return run |
| 147 | + |
| 148 | + |
| 149 | +if __name__ == "__main__": |
| 150 | + driver() |
0 commit comments