test_pytorch.py 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. import torch
  2. import torch.nn as nn
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. print(torch.__version__) # pytorch版本
  6. print(torch.version.cuda) # cuda版本
  7. print(torch.cuda.is_available()) # 查看cuda是否可用
  8. #
  9. # 使用GPU or CPU
  10. device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  11. # 生成一些随机的训练数据
  12. np.random.seed(42)
  13. x = np.random.rand(1000, 1)
  14. y = 2 * x + 1 + 0.5 * np.random.randn(1000, 1)
  15. # 将数据转换为张量
  16. x_tensor = torch.from_numpy(x).float()
  17. y_tensor = torch.from_numpy(y).float()
  18. # 定义线性回归模型
  19. class LinearRegressionModel(nn.Module):
  20. def __init__(self):
  21. super(LinearRegressionModel, self).__init__()
  22. self.linear = nn.Linear(1, 1)
  23. def forward(self, x):
  24. return self.linear(x)
  25. # 创建模型实例
  26. model = LinearRegressionModel()
  27. # 定义损失函数和优化器
  28. criterion = nn.MSELoss()
  29. optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
  30. # 训练模型
  31. num_epochs = 1000
  32. for epoch in range(num_epochs):
  33. # 前向传播
  34. outputs = model(x_tensor)
  35. loss = criterion(outputs, y_tensor)
  36. # 反向传播和优化
  37. optimizer.zero_grad()
  38. loss.backward()
  39. optimizer.step()
  40. if (epoch + 1) % 10 == 0:
  41. print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item()}')
  42. # 进行预测
  43. with torch.no_grad():
  44. predicted = model(x_tensor)
  45. # 绘制原始数据和预测结果
  46. plt.scatter(x, y, label='Original Data')
  47. plt.plot(x, predicted.numpy(), color='red', label='Predicted Line')
  48. plt.xlabel('x')
  49. plt.ylabel('y')
  50. plt.legend()
  51. plt.show()