日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

pytorch:Logistic回归

發布時間:2024/4/14 编程问答 43 豆豆
生活随笔 收集整理的這篇文章主要介紹了 pytorch:Logistic回归 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
import matplotlib.pyplot as plt from torch import nn, optim import numpy as np import torch from torch.autograd import Variable # 設置字體為中文 plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False # 讀取數據 def readText():with open('data.txt', 'r') as f:data_list = f.readlines()data_list = [i.split('\n')[0] for i in data_list]data_list = [i.split(',') for i in data_list]data = [(float(i[0]), float(i[1]), int(i[2])) for i in data_list]x_data = [[float(i[0]), float(i[1])] for i in data_list]y_data = [float(i[2]) for i in data_list]return data, x_data, y_data # 原始數據可視化 def visualize(data):x0 = list(filter(lambda x: x[-1] == 0.0, data)) # 找出類別為0的數據集x1 = list(filter(lambda x: x[-1] == 1.0, data)) # 找出類別為1的數據集plot_x0_0 = [i[0] for i in x0] # 類別0的xplot_x0_1 = [i[1] for i in x0] # 類別0的yplot_x1_0 = [i[0] for i in x1] # 類別1的xplot_x1_1 = [i[1] for i in x1] # 類別1的yplt.plot(plot_x0_0, plot_x0_1, 'ro', label='類別 0')plt.plot(plot_x1_0, plot_x1_1, 'bo', label='類別 1')plt.legend() # 顯示圖例plt.title(label='原始數據分布情況')plt.show() def visualize_after(data):x0 = list(filter(lambda x: x[-1] == 0.0, data)) # 找出類別為0的數據集x1 = list(filter(lambda x: x[-1] == 1.0, data)) # 找出類別為1的數據集plot_x0_0 = [i[0] for i in x0] # 類別0的xplot_x0_1 = [i[1] for i in x0] # 類別0的yplot_x1_0 = [i[0] for i in x1] # 類別1的xplot_x1_1 = [i[1] for i in x1] # 類別1的yplt.plot(plot_x0_0, plot_x0_1, 'ro', label='類別 0')plt.plot(plot_x1_0, plot_x1_1, 'bo', label='類別 1')# 繪制分類函數w0, w1 = model.lr.weight[0]w0 = w0.data.item()w1 = w1.data.item()b = model.lr.bias.item()plot_x = np.arange(30, 100, 0.1)plot_y = (-w0 * plot_x - b) / w1plt.plot(plot_x, plot_y, 'yo', label='分類線')plt.legend() # 顯示圖例plt.title(label='分類線可視化')plt.show() # Logistic回歸模型 class LogisticRegression(nn.Module):def __init__(self):super(LogisticRegression, self).__init__()self.lr = nn.Linear(2, 1)self.sm = nn.Sigmoid() # 激活函數類型為sigmoid,經過激活函數,值控制在0到1之間def forward(self, x):x = self.lr(x)x = self.sm(x)return x # 主函數 if __name__ == '__main__':data, x_data, y_data = readText() # 讀取數據x_data = torch.from_numpy(np.array(x_data))y_data = torch.from_numpy(np.array(y_data))visualize(data) # 可視化(觀察原始數據分布情況)# 獲取到模型model = LogisticRegression()if torch.cuda.is_available():model.cuda()# 損失函數以及梯度下降criterion = nn.BCELoss() # 二分類的損失函數optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)ctn = []lo = []for epoch in range(50000):if torch.cuda.is_available():x = Variable(x_data).cuda()y = Variable(y_data).cuda()else:x = Variable(x_data)y = Variable(y_data)x = torch.tensor(x, dtype=torch.float32)out = model(x)y = torch.tensor(y, dtype=torch.float32)loss = criterion(out, y)print_loss = loss.data.item()mask = out.ge(0.5).float() # 大于0.5則輸出為1correct = (mask == y).sum() # 統計輸出為1的個數acc = correct.item() / x.size(0)optimizer.zero_grad() # 梯度清零loss.backward() # 反向傳播optimizer.step() # 更新梯度ctn.append(epoch+1)lo.append(print_loss)if (epoch+1) % 1000 == 0:print('*'*10)print('epoch {}'.format(epoch+1))print('loss is {:.4f}'.format(print_loss))print('acc is {:.4f}'.format(acc))visualize_after(data)# 繪制訓練次數與損失值之間的關系plt.plot(ctn,lo)plt.title(label='訓練次數與損失值關系')plt.xlabel('訓練次數')plt.ylabel('損失值')plt.show()

實驗數據:

34.62365962451697,78.0246928153624,0 30.2867107622687,43.89499752400101,0 35.84740876993872,72.90219802708364,0 60.18259938620976,86.3855209546826,1 79.0327360507101,75.3443764369103,1 45.08327747668339,56.3163717815305,0 61.10666453684766,96.51142588489624,1 75.02474556738889,46.55401354116538,1 76.09878670226257,87.42056971926803,1 84.43281996120035,43.53339331072109,1 95.86155507093572,38.22527805795094,0 75.01365838958247,30.60326323428011,0 82.30705337399482,76.48196330235604,1 69.36458875970939,97.71869196188608,1 39.53833914367223,76.03681085115882,0 53.9710521485623,89.20735013750265,1 69.07014406283025,52.74046973016765,1 67.9468554771161746,67.857410673128,0

實驗結果:

注意事項:

(1)數據太少,沒有找到完整數據。

(2)注意類型轉換,float64轉為float32,對應代碼為:

x = torch.tensor(x, dtype=torch.float32)out = model(x)y = torch.tensor(y, dtype=torch.float32)

?

超強干貨來襲 云風專訪:近40年碼齡,通宵達旦的技術人生

總結

以上是生活随笔為你收集整理的pytorch:Logistic回归的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。