return ws
1)采用10折交叉验证以评估岭回归参数;
2)每一折验证:随机打乱数据,其中训练集和测试集的划分 90%训练 10%测试;
3)10(10折)x30(岭回归参数),通过按照列取平均可获得每个岭回归参数的10折交叉验证平均值,找出误差最小的岭回归参数。
4)数据的还原
可参考博客的数据标准化与去标准化部分的描述:回归分析及实际案例:预测鲍鱼年龄
在岭回归中要求数据要标准化再参与计算,那么在训练完成后新的数据如何进行预测?这个新的数据怎么利用训练的数据进行标准化?
解决方法是:利用在训练数据中得出的回归参数,通过变换实现变相的在新数据预测时的标准化
新的数据一般预测过程:
数据标准化:XT=(XTest-mean(XTrain))/Var(XTrain)
预测:Ytest=XT*Ws+mean(YTrain)
将上述的公式变换:
Ytest=((XTest-mean(XTrain))/Var(XTrain))*Ws+mean(YTrain)
设UnReg=Ws/Var(XTrain)
constantTerm=-mean(XTrain)*Ws/Var(XTrain)+mean(YTrain)
则Ytest=XTest*UnReg+constantTerm(现在的新变换后的预测过程)
##交叉验证--岭回归
ridgeWs,ridgeunReg,ridgeConstantTerm=crossValidation(lgX,lgY,10)#目的是找出最佳的岭回归系数
##测试均方误差
####和标准线性回归的比较
##第一种计算思路:
ridgeW=mat(ones((1,5)))#回归系数的重新组合
ridgeW[0,0]=ridgeConstantTerm
ridgeW[0,1:5]=ridgeunReg
ridgeyHat=lgX1*ridgeW.T#岭回归预测
rssError(lgY,ridgeyHat.T.A)#误差计算
##第二种计算的思路:
xMat=mat(lgX)
yMat=mat(lgY)
ridgeyHat=xMat*ridgeunReg.T+ridgeConstantTerm#岭回归预测
rssError(yMat.A,ridgeyHat.T.A)#误差计算
m = len(yArr)#样本点个数
indexList = list(range(m))
errorMat = zeros((numVal,30))#create error mat 30columns numVal rows
for i in range(numVal):#交叉验证
trainX=[]; trainY=[]
testX = []; testY = []
random.shuffle(indexList)#随机打乱样本索引
#训练集和测试集的划分 90%训练 10%测试
for j in range(m):#create training set based on first 90% of values in indexList
if j < m*0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
#岭回归(岭回归次数默认)
wMat = ridgeTest(trainX,trainY) #30*特征数 get 30 weight vectors from ridge
#30组回归系数
for k in range(30):#loop over all of the ridge estimates
matTestX = mat(testX); matTrainX=mat(trainX)
meanTrain = mean(matTrainX,0)
varTrain = var(matTrainX,0)
matTestX = (matTestX-meanTrain)/varTrain #regularize test with training params
yEst = matTestX * mat(wMat[k,:]).T + mean(trainY)#test ridge results and store
errorMat[i,k]=rssError(yEst.T.A,array(testY))
#print errorMat[i,k]
#计算所有这些误差值的均值
meanErrors = mean(errorMat,0)#errorMat为 10*30 30个岭回归参数 10次交叉验证 按照把轴向数据求平均 得到每列数据的平均值,也即是10折交叉验证的平均 calc avg performance of the different ridge weight vectors
minMean = float(min(meanErrors))#哪个岭回归参数下的误差最小
bestWeights = wMat[nonzero(meanErrors==minMean)]#找出误差最小的回归参数
#can unregularize to get model
#when we regularized we wrote Xreg = (x-meanX)/var(x)
#we can now write in terms of x not Xreg: x*w/var(x) - meanX/var(x) +meanY
xMat = mat(xArr); yMat=mat(yArr).T
meanX = mean(xMat,0); varX = var(xMat,0)
unReg = bestWeights/varX
print("the best model from Ridge Regression is:\n",unReg)
#标准化后数据还原
constantTerm=-1*sum(multiply(meanX,unReg)) + mean(yMat)
print("with constant term: ",constantTerm)
return bestWeights,unReg,constantTerm
def ridgeRegres(xMat,yMat,lam=0.2):
xTx = xMat.T*xMat#2*2 n*n
denom = xTx + eye(shape(xMat)[1])*lam#n*n
if linalg.det(denom) == 0.0:
print("This matrix is singular, cannot do inverse")
return
ws = denom.I * (xMat.T*yMat)
return ws
函数说明:岭回归参数lambda调节
Parameters:
xArr- 特征矩阵
yArr- 响应值
Returns:
wMat - 返回一组w(维数和特征数对应)系数
Author:
heda3
Blog:
https://blog.csdn.net/heda3
Modify:
2020-01-10
def ridgeTest(xArr,yArr):
xMat = mat(xArr); yMat=mat(yArr).T
#数据标准化处理
yMean = mean(yMat,0)
yMat = yMat - yMean #to eliminate X0 take mean off of Y
#regularize X's
xMeans = mean(xMat,0) #calc mean then subtract it off
xVar = var(xMat,0) #calc variance of Xi then divide by it
xMat = (xMat - xMeans)/xVar
numTestPts = 30#设置lambda参数迭代次数
wMat = zeros((numTestPts,shape(xMat)[1]))#30*2的矩阵
for i in range(numTestPts):
ws = ridgeRegres(xMat,yMat,exp(i-10))
wMat[i,:]=ws.T
return wMat