我正在嘗試實現邏輯回歸。我已將這些特征映射到 x1^2*x2^0 + x1^1*x2^1 + 形式的多項式......現在我想繪制相同的決策邊界。經過這個答案后,我寫了下面的代碼來使用輪廓函數import numpy as npimport pandas as pdimport matplotlib.pyplot as pltdef map_features(x, degree): x_old = x.copy() x = pd.DataFrame({"intercept" : [1]*x.shape[0]}) column_index = 1 for i in range(1, degree+1): for j in range(0, i+1): x.insert(column_index, str(x_old.columns[1]) + "^" + str(i-j) + str(x_old.columns[2]) + "^" + str(j), np.multiply(x_old.iloc[:,1]**(i-j), x_old.iloc[:,2]**(j))) column_index+=1 return xdef normalize_features(x): for column_name in x.columns[1:]: mean = x[column_name].mean() std = x[column_name].std() x[column_name] = (x[column_name] - mean) / std return xdef normalize_features2(x): for column_name in x.columns[1:-1]: mean = x[column_name].mean() std = x[column_name].std() x[column_name] = (x[column_name] - mean) / std return xdef sigmoid(z): # print(z) return 1/(1+np.exp(-z))def predict(x): global theta probability = np.asscalar(sigmoid(np.dot(x,theta))) if(probability >= 0.5): return 1 else: return 0def predict2(x): global theta probability = np.asscalar(sigmoid(np.dot(x.T,theta))) if(probability >= 0.5): return 1 else: return 0def cost(x, y, theta): m = x.shape[0] h_theta = pd.DataFrame(sigmoid(np.dot(x,theta))) cost = 1/m * ((-np.multiply(y,h_theta.apply(np.log)) - np.multiply(1-y, (1-h_theta).apply(np.log))).sum()) return costdef gradient_descent(x, y, theta): global cost_values m = x.shape[0] iterations = 1000 alpha = 0.03 cost_values = pd.DataFrame({'iteration' : [0], 'cost' : [cost(x,y,theta)]})下面是我作為輸出得到的圖我不確定我是否正確解釋了這一點,但這條線應該更像是一條分隔這兩個類的曲線。數據集在這里ex2data1.csv
添加回答
舉報
0/150
提交
取消