test_pytorch_cuda.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. import torch
  2. import torch.nn as nn
  3. import torch.optim as optim
  4. from torchvision import datasets, transforms
  5. import matplotlib.pyplot as plt
  6. # 检查是否有可用的GPU
  7. device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  8. # 定义数据预处理步骤
  9. transform = transforms.Compose([
  10. transforms.ToTensor(),
  11. transforms.Normalize((0.1307,), (0.3081,))
  12. ])
  13. # 加载MNIST训练数据集
  14. train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)
  15. train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
  16. # 加载MNIST测试数据集
  17. test_dataset = datasets.MNIST(root='./data', train=False, transform=transform, download=True)
  18. test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False)
  19. # 定义卷积神经网络模型
  20. class Net(nn.Module):
  21. def __init__(self):
  22. super(Net, self).__init__()
  23. self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
  24. self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
  25. self.fc1 = nn.Linear(320, 50)
  26. self.fc2 = nn.Linear(50, 10)
  27. def forward(self, x):
  28. x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2))
  29. x = nn.functional.relu(nn.functional.max_pool2d(self.conv2(x), 2))
  30. x = x.view(-1, 320)
  31. x = nn.functional.relu(self.fc1(x))
  32. x = self.fc2(x)
  33. return x
  34. # 创建模型实例并将其移动到设备(GPU或CPU)
  35. model = Net()
  36. model.to(device)
  37. # 定义损失函数和优化器
  38. criterion = nn.CrossEntropyLoss()
  39. optimizer = optim.SGD(model.parameters(), lr=0.01)
  40. # 训练模型
  41. def train_model():
  42. model.train()
  43. for epoch in range(20):
  44. running_loss = 0.0
  45. for batch_idx, (data, target) in enumerate(train_loader):
  46. data, target = data.to(device), target.to(device)
  47. optimizer.zero_grad()
  48. output = model(data)
  49. loss = criterion(output, target)
  50. loss.backward()
  51. optimizer.step()
  52. running_loss += loss.item()
  53. if batch_idx % 100 == 0:
  54. print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
  55. epoch + 1, batch_idx * 64, len(train_loader.dataset),
  56. 100. * batch_idx / len(train_loader), running_loss / (batch_idx + 1)))
  57. print('====> Epoch: {} Average Loss: {:.4f}'.format(epoch + 1, running_loss / len(train_loader)))
  58. # 测试模型
  59. def test_model():
  60. model.eval()
  61. test_loss = 0
  62. correct = 0
  63. with torch.no_grad():
  64. for data, target in test_loader:
  65. data, target = data.to(device), target.to(device)
  66. output = model(data)
  67. test_loss += criterion(output, target).item()
  68. pred = output.argmax(dim=1, keepdim=True)
  69. correct += pred.eq(target.view_as(pred)).sum().item()
  70. test_loss /= len(test_loader)
  71. accuracy = correct / len(test_loader.dataset)
  72. print('Test set: Average loss: {:.4f}, Accuracy: {:.2f}%'.format(test_loss, 100 * accuracy))
  73. if __name__ == "__main__":
  74. train_model()
  75. test_model()