多维输入逻辑斯蒂回归
多层网络
完整代码:
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
# 1.准备数据,从csv文件读取
xy = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(xy[:,:-1])
y_data = torch.from_numpy(xy[:,[-1]])
# 2.设计模型(类) 继承nn.Module 以便使用其方法
class Model(torch.nn.Module):
# 初始化
def __init__(self):
super(Model, self).__init__()
#3 层神经网络
self.linear1 = torch.nn.Linear(8, 6) # Linear是一个线性单元
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
self.sigmoid = torch.nn.Sigmoid()
# 前馈方法
def forward(self, x):
#每层输出是下层输入
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
# 3 loss 和 optimizer(优化器)
criterion = torch.nn.BCELoss(size_average=True)
# 优化器。 model.parameters()获取模型中需要优化的参数,lr(learning rate,学习率)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# 4 训练过程
for epoch in range(100):
# 前馈
y_pred = model(x_data)
# 计算损失
loss = criterion(y_pred, y_data)
print("epoch={},loss={}".format(epoch, loss))
optimizer.zero_grad() # 归零
# 反向传播
loss.backward()
# 更新、优化参数
optimizer.step()