1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- import torch
- import torch.nn as nn
- import numpy as np
- import matplotlib.pyplot as plt
- print(torch.__version__) # pytorch版本
- print(torch.version.cuda) # cuda版本
- print(torch.cuda.is_available()) # 查看cuda是否可用
- #
- # 使用GPU or CPU
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- # 生成一些随机的训练数据
- np.random.seed(42)
- x = np.random.rand(1000, 1)
- y = 2 * x + 1 + 0.5 * np.random.randn(1000, 1)
- # 将数据转换为张量
- x_tensor = torch.from_numpy(x).float()
- y_tensor = torch.from_numpy(y).float()
- # 定义线性回归模型
- class LinearRegressionModel(nn.Module):
- def __init__(self):
- super(LinearRegressionModel, self).__init__()
- self.linear = nn.Linear(1, 1)
- def forward(self, x):
- return self.linear(x)
- # 创建模型实例
- model = LinearRegressionModel()
- # 定义损失函数和优化器
- criterion = nn.MSELoss()
- optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
- # 训练模型
- num_epochs = 1000
- for epoch in range(num_epochs):
- # 前向传播
- outputs = model(x_tensor)
- loss = criterion(outputs, y_tensor)
- # 反向传播和优化
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- if (epoch + 1) % 10 == 0:
- print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item()}')
- # 进行预测
- with torch.no_grad():
- predicted = model(x_tensor)
- # 绘制原始数据和预测结果
- plt.scatter(x, y, label='Original Data')
- plt.plot(x, predicted.numpy(), color='red', label='Predicted Line')
- plt.xlabel('x')
- plt.ylabel('y')
- plt.legend()
- plt.show()
|