mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-17 21:10:43 +00:00
This change adds a new pipeline for checking Python code. Currently this pipeline only runs flake8. flake8 is also run as part of the CMake project builds, but we can switch over completely to the new pipeline later. The .flake8 config file was also updated to make it easier to run standalone (flake8 --config ./.flake8) and some Python formatting issues were addressed in files that were not previously scanned.
133 lines
5 KiB
Python
133 lines
5 KiB
Python
import argparse
|
|
import os
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torch.optim as optim
|
|
from torchvision import datasets, transforms
|
|
|
|
|
|
# Pytorch model
|
|
class NeuralNet(nn.Module):
|
|
def __init__(self, input_size, hidden_size, num_classes):
|
|
super(NeuralNet, self).__init__()
|
|
self.fc1 = nn.Linear(input_size, hidden_size)
|
|
self.relu = nn.ReLU()
|
|
self.fc2 = nn.Linear(hidden_size, num_classes)
|
|
|
|
def forward(self, input1):
|
|
out = self.fc1(input1)
|
|
out = self.relu(out)
|
|
out = self.fc2(out)
|
|
return out
|
|
|
|
|
|
def my_loss(x, target, is_train=True):
|
|
if is_train:
|
|
return F.nll_loss(F.log_softmax(x, dim=1), target)
|
|
else:
|
|
return F.nll_loss(F.log_softmax(x, dim=1), target, reduction='sum')
|
|
|
|
|
|
# Helpers
|
|
def train(args, model, device, train_loader, optimizer, epoch):
|
|
model.train()
|
|
for batch_idx, (data, target) in enumerate(train_loader):
|
|
if batch_idx == args.train_steps:
|
|
break
|
|
data, target = data.to(device), target.to(device)
|
|
data = data.reshape(data.shape[0], -1)
|
|
optimizer.zero_grad()
|
|
output = model(data)
|
|
loss = my_loss(output, target)
|
|
loss.backward()
|
|
optimizer.step()
|
|
if batch_idx % args.log_interval == 0:
|
|
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
|
epoch, batch_idx * len(data), len(train_loader.dataset),
|
|
100. * batch_idx / len(train_loader), loss.item()))
|
|
|
|
|
|
def test(model, device, test_loader):
|
|
model.eval()
|
|
test_loss = 0
|
|
correct = 0
|
|
with torch.no_grad():
|
|
for data, target in test_loader:
|
|
data, target = data.to(device), target.to(device)
|
|
data = data.reshape(data.shape[0], -1)
|
|
output = model(data)
|
|
# Stats
|
|
test_loss += my_loss(output, target, False).item()
|
|
pred = output.argmax(dim=1, keepdim=True)
|
|
correct += pred.eq(target.view_as(pred)).sum().item()
|
|
|
|
test_loss /= len(test_loader.dataset)
|
|
|
|
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
|
test_loss, correct, len(test_loader.dataset),
|
|
100. * correct / len(test_loader.dataset)))
|
|
|
|
|
|
def main():
|
|
# Training settings
|
|
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
|
|
parser.add_argument('--train-steps', type=int, default=-1, metavar='N',
|
|
help='number of steps to train. Set -1 to run through whole dataset (default: -1)')
|
|
parser.add_argument('--batch-size', type=int, default=20, metavar='N',
|
|
help='input batch size for training (default: 20)')
|
|
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
|
|
help='input batch size for testing (default: 1000)')
|
|
parser.add_argument('--epochs', type=int, default=1, metavar='N',
|
|
help='number of epochs to train (default: 1)')
|
|
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
|
|
help='learning rate (default: 0.01)')
|
|
parser.add_argument('--no-cuda', action='store_true', default=False,
|
|
help='disables CUDA training')
|
|
parser.add_argument('--seed', type=int, default=1, metavar='S',
|
|
help='random seed (default: 1)')
|
|
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
|
|
help='how many batches to wait before logging training status')
|
|
parser.add_argument('--save-path', type=str, default='',
|
|
help='Path for Saving the current Model')
|
|
|
|
# Basic setup
|
|
args = parser.parse_args()
|
|
if not args.no_cuda and torch.cuda.is_available():
|
|
device = "cuda"
|
|
else:
|
|
device = "cpu"
|
|
torch.manual_seed(args.seed)
|
|
|
|
# Data loader
|
|
train_loader = torch.utils.data.DataLoader(
|
|
datasets.MNIST('./data', train=True, download=True,
|
|
transform=transforms.Compose([
|
|
transforms.ToTensor(),
|
|
transforms.Normalize((0.1307,), (0.3081,))
|
|
])),
|
|
batch_size=args.batch_size, shuffle=True)
|
|
|
|
if args.test_batch_size > 0:
|
|
test_loader = torch.utils.data.DataLoader(
|
|
datasets.MNIST('./data', train=False, transform=transforms.Compose([
|
|
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
|
|
batch_size=args.test_batch_size, shuffle=True)
|
|
|
|
# Modeling
|
|
model = NeuralNet(784, 500, 10).to(device)
|
|
optimizer = optim.SGD(model.parameters(), lr=args.lr)
|
|
|
|
# Train loop
|
|
for epoch in range(1, args.epochs + 1):
|
|
train(args, model, device, train_loader, optimizer, epoch)
|
|
if args.test_batch_size > 0:
|
|
test(model, device, test_loader)
|
|
|
|
# Save model
|
|
if args.save_path:
|
|
torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt"))
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|