0
点赞
收藏
分享

微信扫一扫

SOLIDWORKS如何批量转换工程图模板

醉东枫 03-01 15:00 阅读 2

numpy实现: 

import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
import numpy as np

#实现误差J关于theta0偏导数的计算函数
def gradient_theta0(x,y,theta0,theta1):
    h = theta0 + theta1*x
    return np.sum(h-y)/len(x)

#实现误差J关于theta1的偏导数计算函数
def gradient_theta1(x,y,theta0,theta1):
    h = theta0+theta1*x
    return np.sum((h-y)*x)/len(x)

#实现梯度下降算法的迭代函数
#函数传入数据点x和y,迭代速率alpha和迭代次数n

def gradient_descent(x,y,alpha,n):
    #初始化theta0和theta1为0
    theta0 = 0.0
    theta1 = 0.0
    for i in range(1,n+1):
        #首先计算误差J关于theta0和theta1的偏导数
        g0 = gradient_theta0(x,y,theta0,theta1)
        g1 = gradient_theta1(x,y,theta0,theta1)
        #使用梯度下降算法,更新theta0和theta1
        theta0 = theta0 - alpha*g0
        theta1 = theta1 - alpha*g1

        #在每个轮次的迭代中,打印迭代轮数、参数ta0\ta1还有损失值loss
        loss = np.mean((theta0 + theta1*x - y) ** 2)
        print(f'Epoch {i},'f'theta0 ={theta0:.3f},'f'Loss:{loss:.3f},')

    return theta0,theta1 #返回最终的迭代结果


if __name__ == '__main__':
    #定义三个样本,(1,1)\(2,2)和(3,3),都保存在x、y中
    x = np.array([1.0,2.0,3.0])
    y = np.array([1.0, 2.0, 3.0])
    alpha = 0.01  #迭代的速率(学习率)
    n = 100  #迭代次数

    #迭代出直线的参数theta0和theta1
    theta0,theta1 = gradient_descent(x,y,alpha,n)

    #打印4,5,6时的预测值
    print('4时刻的梯度值{}'.format(theta0+theta1*4))
    print('5时刻的梯度值{}'.format(theta0+theta1*5))
    print('6时刻的梯度值{}'.format(theta0+theta1*6))

    #将三个训练样本绘制到画板上
    plt.scatter(x,y,color='red',marker='+')
    #将3迭代出的直线绘制到画板上
    x = np.linspace(0,10,100) #0-10均匀画出100个点
    h = theta1*x +theta0 #直线的函数值
    plt.plot(x,h) #画出f1的图像
    plt.show()  #调用show展示,就会得到一个空的画板


'''
报错:AttributeError: module 'backend_interagg' has no attribute 'FigureCanvas'
添加下面
import matplotlib
matplotlib.use('TkAgg') 
'''

pytorch实现:

import torch


#pytorch实现梯度下降法
def gradient_descent(x,y,alpha,n):
    #将输入的x和y转为张量形式
    x = torch.tensor(x)
    y = torch.tensorch(y)
    #定义张量theta1和theta0
    theta1 = torch.tensor(0.0,requires_grad=True)
    theta0 = torch.tensor(0.0,requires_grad=True)

    #定义一个优化器
    optimizer = torch.optim.Adam(theta1,theta0,lr=alpha)

    #进入迭代循环
    for i in range(1,n+1):
        #计算军方误差
        loss = torch.mean((theta0 + theta1*x -y) **2)
        loss.backward()   #计算loss关于参数的偏导数
        optimizer.zero_grad()  #将梯度信息清空,为下一次迭代做准备

        #打印调试信息
        print(f'Epoch{i},'f'thetal0 ={theta0:.3f},'f'theta1 = {theta1:.3f},'f'Loss:{loss:.3f}')

    return theta0.ietm(),theta1.item()

举报

相关推荐

0 条评论