2000字范文,分享全网优秀范文,学习好帮手!
2000字范文 > 机器学习基石-作业三-代码部分

机器学习基石-作业三-代码部分

时间:2019-05-04 10:27:09

相关推荐

机器学习基石-作业三-代码部分

梯度下降迭代和牛顿迭代,gradient_and_newton.py

# -*- coding:utf-8 -*-# Author: Evan Miimport numpy as np"""作业三中使用梯度下降和牛顿法进行迭代"""def update(u, v, eta):u_tem = u - eta * (np.exp(u) + v * np.exp(u*v) + 2 * u - 2 * v - 3)v_tem = v - eta * (2 * np.exp(2*v) + u * np.exp(u * v) - 2 * u + 4 * v -2)return u_tem, v_temdef iter_update(u, v, times):uo = uvo = vfor i in range(times):uo, vo = update(uo, vo, 0.01)return np.exp(uo) + np.exp(2 * vo) + np.exp(uo * vo) + uo ** 2 - 2 * uo * vo + 2 * vo ** 2 - 3 * uo - 2 * vodef update_newton(u, v):gradient_tem = np.array([np.exp(u) + v * np.exp(u*v) + 2 * u - 2 * v - 3,2 * np.exp(2*v) + u * np.exp(u * v) - 2 * u + 4 * v - 2])laplace_tem = np.array([[np.exp(u) + (v ** 2) * np.exp(u * v) + 2, u * v * np.exp(u * v) + np.exp(u * v) - 2],[u * v * np.exp(u * v) + np.exp(u * v) - 2, 4 * np.exp(2 * v) + (u ** 2) * np.exp(u * v) + 4]])result = np.array([u, v]) - np.dot(np.linalg.pinv(laplace_tem), np.transpose(gradient_tem))return resultdef iter_update_newton(u, v, times):uo = uvo = vfor i in range(times):uo, vo = update_newton(uo, vo)return np.exp(uo) + np.exp(2 * vo) + np.exp(uo * vo) + uo ** 2 - 2 * uo * vo + 2 * vo ** 2 - 3 * uo - 2 * voprint(iter_update(0, 0, 5))print(iter_update_newton(0, 0, 5))

线性回归代码,

common.py

# -*- coding:utf-8 -*-# Author: Evan Miimport numpy as npdef data_generator(size):x_arr = np.concatenate((np.array([np.random.uniform(-1, 1, size)]).T, np.array([np.random.uniform(-1, 1, size)]).T),axis=1)y_arr = target_function(x_arr)tem = np.ones((size, 1))x_arr = np.concatenate((tem, x_arr), axis=1)y_arr = np.where(np.random.uniform(0, 1, size) < 0.1, -y_arr, y_arr)return x_arr, y_arrdef sign_zero_as_neg(x):"""这里修改了np自带的sign函数,当传入的值为0的时候,不再返回0,而是-1;也就是说在边界上的点按反例处理:param x::return:"""result = np.sign(x)result[result == 0] = -1return resultdef target_function(x):x_tem = (x * x).sum(axis=1) - 0.6result = sign_zero_as_neg(x_tem)return result

linear_regression_al.py

# -*- coding:utf-8 -*-# Author: Evan Miimport numpy as npfrom linear_regression import commondef e_in_counter(x_arr, y_arr):w_lin = np.dot(np.dot(np.linalg.pinv(np.dot(x_arr.T, x_arr)), x_arr.T), y_arr.T)y_in = common.sign_zero_as_neg(np.dot(x_arr, w_lin))errs = np.where(y_in == y_arr, 0, 1)return errs.sum()/errs.size, w_lindef e_out_counter(x_arr, y_arr, w_lin):y_in = common.sign_zero_as_neg(np.dot(x_arr, w_lin))errs = np.where(y_in == y_arr, 0, 1)return errs.sum() / errs.sizedef transform(x_arr):ones_tem = x_arr[:, 0]x1_tem = x_arr[:, 1]x2_tem = x_arr[:, 2]return np.concatenate((np.array([ones_tem]).T, np.array([x1_tem]).T, np.array([x2_tem]).T,np.array([x1_tem * x2_tem]).T, np.array([x1_tem ** 2]).T, np.array([x2_tem ** 2]).T), axis=1)if __name__ == '__main__':avg = 0w_avg = 0avg_transform = 0w_transform = 0for i in range(1000):xo, yo = common.data_generator(1000)e_in, w_in = e_in_counter(xo, yo)avg = avg + (1.0 / (i + 1)) * (e_in - avg)w_avg = w_avg + (1.0 / (i + 1)) * (w_in - w_avg)x_trans = transform(xo)e_tran, w_trans = e_in_counter(x_trans, yo)avg_transform = avg_transform + (1.0 / (i + 1)) * (e_tran - avg_transform)w_transform = w_transform + (1.0 / (i + 1)) * (w_trans - w_transform)print("avg:", avg, "w_avg:", w_avg)print("avg_trans:", avg_transform, "w_trans", w_transform)xo, yo = common.data_generator(1000)x_trans = transform(xo)e_out = e_out_counter(x_trans, yo, w_transform)print("e_out:", e_out)

对率回归代码logistic_regression_al.py

# -*- coding:utf-8 -*-# Author: Evan Miimport numpy as npdef load_data(file_name):x = []y = []with open(file_name, 'r+') as f:for line in f:line = line.rstrip("\n").strip(' ')temp = line.split(" ")temp.insert(0, '1')x_temp = [float(val) for val in temp[:-1]]y_tem = [int(val) for val in temp[-1:]][0]x.append(x_temp)y.append(y_tem)nx = np.array(x)ny = np.array(y)return nx, nydef gradient_decent_logistic_regression(x, y, eta, w, times):local_w = wfor i in range(times):tem_w = np.dot((1.0 / (1 + np.exp(-((-y) * np.dot(x, local_w))))), np.array([-y]).T * x) / np.size(y)local_w = local_w - eta * tem_wreturn local_wdef stochastic_gradient_decent_logistic_regression(x, y, eta, w, times):local_w = windex = 0for i in range(times):x_tem = x[index, :]y_tem = y[index]tem_w = (1.0 / (1 + np.exp(-((-y_tem) * np.dot(local_w, x_tem))))) * (-y_tem) * x_temlocal_w = local_w - eta * tem_windex = (index + 1) % np.size(y)return local_wdef e_out_counter(x, y, w):local_tem = 1.0 / (1 + np.exp(np.dot(x, w)))vec_result = np.where(local_tem > 0.5, 1, -1)result = np.where(vec_result == y, 1, 0)return sum(result)/np.size(result)if __name__ == '__main__':x_train, y_train = load_data('data/train.dat')x_val, y_val = load_data('data/test.dat')w_one = gradient_decent_logistic_regression(x_train, y_train, 0.001, np.zeros(np.size(x_train, 1)), 2000)e_out_one = e_out_counter(x_val, y_val, w_one)print("e_out_one:", e_out_one)w_two = gradient_decent_logistic_regression(x_train, y_train, 0.01, np.zeros(np.size(x_train, 1)), 2000)e_out_two = e_out_counter(x_val, y_val, w_two)print("e_out_two:", e_out_two)w_s = stochastic_gradient_decent_logistic_regression(x_train, y_train, 0.001, np.zeros(np.size(x_train, 1)), 2000)e_out_s = e_out_counter(x_val, y_val, w_s)print("e_out_s:", e_out_s)

详细项目代码及代码使用的数据见:梯度下降与牛顿迭代、线性回归、对率回归

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。