LinearRegression
使用pytorch,实现一层LinearRegression:
使用sklearn准备数据
sklearn官方文档
import torch
import torch.nn as nn
import numpy as np # 用于画图显示数据
from sklearn import datasets # 用于获取数据集
import matplotlib.pyplot as plt # 画图
# 0)准备数据
# 借助sklearn的datasets库,生成一个随机的回归数据集
# n_samples--样本数量:100,
# n_features--特征数量:1,
# noise--噪声:20,随机遍布程度
# random_state-随机数种子:用于确保每次重复执行会生成同一数据集
# 返回两个数组(官方文档)
# X:ndarray of shape (n_samples, n_features)
# The input samples.
# y:ndarray of shape (n_samples,) or (n_samples, n_targets)
# The output values.
X_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=4)
将numpy转为tensor
# 转化为tensor数据类型
# from_numpy()在同一内存下,将numpy转为tensor
# astype(将np.ndarray(数组)的数据转化为float32)
X = torch.from_numpy(X_numpy.astype(np.float32))
y = torch.from_numpy(y_numpy.astype(np.float32))
# view():改变tensor形状,y原本为1维(100)转化为2维(100,1)
y = y.view(y.shape[0], 1)
# 初始化样本数量和特征
n_samples, n_features = X.shape
构建模型
Linear官方文档
# 1) model
# Linear model:f = wx + b
input_size = n_features
output_size = 1
# 实例化一个模型
model = nn.Linear(input_size, output_size)
# 2) Loss and optimizer
learning_rate = 0.01
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 3) Training loop
num_epochs = 100
for epoch in range(num_epochs):
# Forward pass and loss
y_predicted = model(X)
loss = criterion(y_predicted, y)
# Backward pass and update
loss.backward()
optimizer.step()
# zero grad before new step
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# Plot
# 用detach()去除梯度并转回为np.ndarray数据类型用于画图
predicted = model(X).detach().numpy()
plt.plot(X_numpy, y_numpy, 'ro')
plt.plot(X_numpy, predicted, 'b')
plt.show()
epoch: 10, loss = 1692.7443
epoch: 20, loss = 1303.2531
epoch: 30, loss = 1024.7643
epoch: 40, loss = 825.2753
epoch: 50, loss = 682.1303
epoch: 60, loss = 579.2510
epoch: 70, loss = 505.2017
epoch: 80, loss = 451.8304
epoch: 90, loss = 413.3145
epoch: 100, loss = 385.4872