日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

autoencoder

發(fā)布時間:2025/3/21 编程问答 27 豆豆
生活随笔 收集整理的這篇文章主要介紹了 autoencoder 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
前幾天看到autoencoder的科普博文,里面有C的實現(xiàn)代碼,這幾天在看python和dl的東西,就有了這個python版的autoencoder。資料參考 Andrew Ng的sparse autoencoder ,練習(xí)中忽略了其中的正則懲罰項。
練習(xí)代碼如下,供參考:
import numpy as np

import matplotlib.pyplot as plt

class AutoEncoder(): ???""" Auto Encoder ? ???layer ?????1 ????2 ???... ???... ???L-1 ???L ?????W ???????0 ????1 ???... ???... ???L-2 ?????B ???????0 ????1 ???... ???... ???L-2 ?????Z ?????????????0 ????1 ????... ???L-3 ???L-2 ?????A ?????????????0 ????1 ????... ???L-3 ???L-2 ???""" ??? ???def __init__(self, X, Y, nNodes): ???????# training samples ???????self.X = X ???????self.Y = Y ???????# number of samples ???????self.M = len(self.X) ???????# layers of networks ???????self.nLayers = len(nNodes) ???????# nodes at layers ???????self.nNodes = nNodes ???????# parameters of networks ???????self.W = list() ???????self.B = list() ???????self.dW = list() ???????self.dB = list() ???????self.A = list() ???????self.Z = list() ???????self.delta = list() ???????for iLayer in range(self.nLayers - 1): ???????????self.W.append( np.random.rand(nNodes[iLayer]*nNodes[iLayer+1]).reshape(nNodes[iLayer],nNodes[iLayer+1]) )? ???????????self.B.append( np.random.rand(nNodes[iLayer+1]) ) ???????????self.dW.append( np.zeros([nNodes[iLayer], nNodes[iLayer+1]]) ) ???????????self.dB.append( np.zeros(nNodes[iLayer+1]) ) ???????????self.A.append( np.zeros(nNodes[iLayer+1]) ) ???????????self.Z.append( np.zeros(nNodes[iLayer+1]) ) ???????????self.delta.append( np.zeros(nNodes[iLayer+1]) ) ??????????? ???????# value of cost function ???????self.Jw = 0.0 ???????# active function (logistic function) ???????self.sigmod = lambda z: 1.0 / (1.0 + np.exp(-z)) ???????# learning rate ???????self.alpha = 1.2 ???????# steps of iteration ???????self.steps = 30000 ??????? ???def BackPropAlgorithm(self): ???????# clear values ???????self.Jw -= self.Jw ???????for iLayer in range(self.nLayers-1): ???????????self.dW[iLayer] -= self.dW[iLayer] ???????????self.dB[iLayer] -= self.dB[iLayer] ???????# propagation (iteration over M samples) ??? ???????for i in range(self.M): ???????????# Forward propagation ???????????for iLayer in range(self.nLayers - 1): ???????????????if iLayer==0: # first layer ???????????????????self.Z[iLayer] = np.dot(self.X[i], self.W[iLayer]) ???????????????else: ???????????????????self.Z[iLayer] = np.dot(self.A[iLayer-1], self.W[iLayer]) ???????????????self.A[iLayer] = self.sigmod(self.Z[iLayer] + self.B[iLayer]) ??????????? ???????????# Back propagation ???????????for iLayer in range(self.nLayers - 1)[::-1]: # reserve ???????????????if iLayer==self.nLayers-2:# last layer ???????????????????self.delta[iLayer] = -(self.X[i] - self.A[iLayer]) * (self.A[iLayer]*(1-self.A[iLayer])) ???????????????????self.Jw += np.dot(self.Y[i] - self.A[iLayer], self.Y[i] - self.A[iLayer])/self.M ???????????????else: ???????????????????self.delta[iLayer] = np.dot(self.W[iLayer].T, self.delta[iLayer+1]) * (self.A[iLayer]*(1-self.A[iLayer])) ???????????????# calculate dW and dB? ???????????????if iLayer==0: ???????????????????self.dW[iLayer] += self.X[i][:, np.newaxis] * self.delta[iLayer][:, np.newaxis].T ???????????????else: ???????????????????self.dW[iLayer] += self.A[iLayer-1][:, np.newaxis] * self.delta[iLayer][:, np.newaxis].T ???????????????self.dB[iLayer] += self.delta[iLayer]? ???????# update ???????for iLayer in range(self.nLayers-1): ???????????self.W[iLayer] -= (self.alpha/self.M)*self.dW[iLayer] ???????????self.B[iLayer] -= (self.alpha/self.M)*self.dB[iLayer] ??????? ???def PlainAutoEncoder(self): ???????for i in range(self.steps): ???????????self.BackPropAlgorithm() ???????????print "step:%d" % i, "Jw=%f" % self.Jw ??????????? ???def ValidateAutoEncoder(self): ???????for i in range(self.M): ???????????print self.X[i] ???????????for iLayer in range(self.nLayers - 1): ???????????????if iLayer==0: # input layer ???????????????????self.Z[iLayer] = np.dot(self.X[i], self.W[iLayer]) ???????????????else: ???????????????????self.Z[iLayer] = np.dot(self.A[iLayer-1], self.W[iLayer]) ???????????????self.A[iLayer] = self.sigmod(self.Z[iLayer] + self.B[iLayer]) ???????????????print "\t layer=%d" % iLayer, self.A[iLayer] # example I ?????? x = np.array([[0,0,0,1], [0,0,1,0], [0,1,0,0], [1,0,0,0]]) nNodes = np.array([ 4, 2, 4 ]) ae1 = AutoEncoder(x,x,nNodes) ae1.PlainAutoEncoder() ae1.ValidateAutoEncoder()
# example II ?????? xx = np.array([[0,0,0,0,0,0,0,1], [0,0,0,0,0,0,1,0], [0,0,0,0,0,1,0,0], [0,0,0,0,1,0,0,0],[0,0,0,1,0,0,0,0], [0,0,1,0,0,0,0,0], [0,1,0,0,0,0,0,0], [1,0,0,0,0,0,0,0]]) nNodes = np.array([ 8, 3, 8 ]) ae2 = AutoEncoder(xx,xx,nNodes) ae2.PlainAutoEncoder() ae2.ValidateAutoEncoder()

編碼結(jié)果: 實驗一 [0 0 0 1] layer=0 [ 0.01302643 ?0.96719543] layer=1 [ ?1.57238820e-02 ??1.20588310e-06 ??1.08413750e-02 ??9.84219313e-01] [0 0 1 0] layer=0 [ 0.01144031 ?0.00744765] layer=1 [ ?1.44924868e-05 ??1.55748023e-02 ??9.81726880e-01 ??1.36618996e-02] [0 1 0 0] layer=0 [ 0.86627009 ?0.01495617] layer=1 [ ?1.62337811e-02 ??9.81973769e-01 ??1.25422146e-02 ??6.73994052e-06] [1 0 0 0] layer=0 [ 0.98847236 ?0.98440664] layer=1 [ ?9.81234922e-01 ??1.18846601e-02 ??7.39007595e-07 ??1.10367082e-02]
實驗二 [0 0 0 0 0 0 0 1] layer=0 [ 0.99508807 ?0.31220899 ?0.48997899] layer=1 [ ?1.33813277e-07 ??2.71499552e-07 ??2.28906156e-02 ??2.00197908e-02 ??2.14807450e-02 ?3.53961509e-06 ??2.08828173e-02 ??9.55541236e-01] [0 0 0 0 0 0 1 0] layer=0 [ 0.88991135 ?0.98941587 ?0.98913571] layer=1 [ ?6.43189673e-10 ??7.34862760e-07 ??4.19985633e-03 ??5.38528950e-03 ??2.76411244e-08 ?1.63956888e-02 ??9.67491870e-01 ??2.38585982e-02] [0 0 0 0 0 1 0 0] layer=0 [ 0.00891799 ?0.66022658 ?0.92714355] layer=1 [ ?2.27007658e-02 ??1.27462283e-02 ??8.81956627e-03 ??2.12609526e-06 ??6.43669115e-08 ?9.69024814e-01 ??2.51647129e-02 ??3.36635876e-07] [0 0 0 0 1 0 0 0] layer=0 [ 0.3733507 ??0.01641385 ?0.00811345] layer=1 [ ?3.33491811e-02 ??7.94141132e-03 ??7.47922495e-04 ??5.42704627e-03 ??9.59578391e-01 ?1.46288703e-05 ??6.53527899e-06 ??2.98059873e-02] [0 0 0 1 0 0 0 0] layer=0 [ 0.70704934 ?0.92868309 ?0.01000832] layer=1 [ ?6.75927422e-09 ??2.30173120e-02 ??1.78638660e-07 ??9.70029655e-01 ??1.01296153e-02 ?2.94668106e-05 ??1.96031261e-02 ??1.88561584e-02] [0 0 1 0 0 0 0 0] layer=0 [ 0.49186973 ?0.00968964 ?0.98189495] layer=1 [ ?1.86617070e-02 ??3.98505598e-07 ??9.66089969e-01 ??3.83731311e-07 ??1.05465090e-04 ?1.14700238e-02 ??4.25592385e-03 ??2.19703418e-02] [0 1 0 0 0 0 0 0] layer=0 [ 0.00866221 ?0.63403138 ?0.0202442 ] layer=1 [ ?1.02791239e-02 ??9.64187014e-01 ??8.42089296e-07 ??2.64467598e-02 ??1.25705154e-02 ?1.58432612e-02 ??8.49363937e-05 ??2.89638777e-06] [1 0 0 0 0 0 0 0] layer=0 [ 0.00534319 ?0.00686477 ?0.42395342] layer=1 [ ?9.52807477e-01 ??2.20162837e-02 ??2.87970157e-02 ??4.59472274e-06 ??2.79663804e-02 ?2.30125617e-02 ??1.24136757e-05 ??4.99659405e-05]

總結(jié)

以上是生活随笔為你收集整理的autoencoder的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。