0
点赞
收藏
分享

微信扫一扫

线性回归 随机梯度下降 C++实现

Gascognya 2022-03-11 阅读 63

线性回归 : 

f(x)=w_{0}x_{0}+w_{1}x_{1}+...+w_{n}x_{n}=w^{T}X

代价函数: 

 J(w)=\frac{1}{2m} \sum_{i=1}^{m}(f(x_{i})-y)

梯度下降:

 w=w- \frac{\alpha}{m}(X*w.T-Y).T*X

C++实现 :

#include<iostream>
#include<string>
#include<list>
#include<algorithm>
#include<fstream>
#include<vector>
#include<stdlib.h>
#include<math.h>
#include <random>
#include<ctime>
using namespace std;

vector<vector<double> > MatrixMult(vector<vector<double>> a,vector<vector<double>> b)//return res=a*b,矩阵乘法
{
	int n1=a.size();
	int n2=a[0].size();
	int n3=b.size();
	int n4=b[0].size();

	vector<vector<double>> res(n1,vector<double>(n4,0));

	if(n2!=n3)
	{
		cout<<"错误:规模不匹配"<<endl;
		system( "PAUSE ");
		return res;
	}
	else
	{
		for(int i=0; i<n1; i++)
		{
			for(int j=0; j<n4; j++)
			{
				double temp=0;
				for(int k=0; k<n2; k++)
				{
					temp+=a[i][k]*b[k][j];
				}
				res[i][j]=temp;
			}
		}
	}
	return res;
}

vector<vector<double> > transform(vector<vector<double>> a)		//转置
{
	int n1=a.size();
	int n2=a[0].size();
	vector<vector<double>> temp(n2,vector<double>(n1,0));
	for(int i=0; i<n2; i++)
	{
		for(int j=0; j<n1; j++)
		{
			temp[i][j]=a[j][i];
		}
	}

	return temp;
}


double costfun(vector<vector<double>> x,vector<vector<double>> y,vector<vector<double>> w)	//代价函数
{
	int num=y.size();		//数据的个数
	int size=w.size();		//w的维数 ,系数个数
	double res=0;
	vector<vector<double>> temp;
	temp=MatrixMult(x,transform(w));
	for(int i=0; i<temp.size(); i++)
	{
		for(int j=0; j<temp[i].size(); j++)
		{
			res+=(temp[i][j]-y[i][j])*(temp[i][j]-y[i][j]);
		}
	}

	return res/num;
}

vector<double> GD(vector<vector<double>> x,vector<vector<double>> y,vector<vector<double>> w,double alpha,int epoch)
{
	int num=y.size();		//数据的个数
	int size=w[0].size();		//w的维数,系数个数
	vector<double> cost;

	//		w=w-(alpha/num)*(x*w.T-y).T*x;
	for(int i=0; i<epoch; i++)
	{
		cout<<"epoch:"<<i+1<<endl;
		
		vector<vector<double>> temp(w);

		temp=MatrixMult(x,transform(w));

		for(int row=0; row<temp.size(); row++)
		{
			for(int col=0; col<temp[row].size(); col++)
			{
				temp[row][col]-=y[row][col];
			}
		}

		temp=MatrixMult(transform(temp),x);

		for(int row=0; row<temp.size(); row++)
		{
			for(int col=0; col<temp[row].size(); col++)
			{
				w[row][col]-=(alpha/double(num))*temp[row][col];
				cout<<"w[row][col]:"<<w[row][col]<<" ";
			}
			cout<<endl;
		}

		cost.push_back( costfun(x,y,w));
		cout<<"cost[i]="<<cost[i]<<endl<<endl;
		if(cost[i]<0.00001)			//提前结束 
			break;
	}
	return cost;
}
vector<vector<double>> LinearRegression(vector<vector<double>> x,vector<vector<double>> y,double alpha,int epoch)
{
	for(int i=0;i<x.size();i++)			//加上常数项 
	{
		x[i].push_back(1);
	}
	vector<vector<double>> w(1,vector<double>(x[0].size()));		//随机生成系数,范围0-2
	srand((unsigned int)(time(NULL)));
	for(int i=0;i<w[0].size();i++)
	{
		w[0][i]=double(rand() % 20 + 0)/10;
	}
	
	
	GD(x,y,w,alpha,epoch);		//梯度下降 
	
	
	vector<double> cost;
	
	cout<<endl<<"Y=";			//输出公式 
	for(int i=0;i<w[0].size()-1;i++)
	{
		cout<<w[0][i]<<"X"<<i+1<<"+";
	}
	cout<<w[0][w[0].size()-1]<<endl;
	return w; 
}
int main()
{
	double alpha = 0.001;
	int epoch = 10000;
	vector<vector<double>> x={{1,2,3},{2,3,4},{3,4,5},{4,5,6},{5,6,7},{6,7,8},{7,8,9}};
	vector<vector<double>> y={{6},{9},{12},{15},{18},{21},{24}};
	vector<vector<double>> w;
	w=LinearRegression(x,y,alpha,epoch);
}

 

 

举报

相关推荐

0 条评论