日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 人工智能 > pytorch >内容正文

pytorch

深度学习笔记(6)BatchNorm批量标准化

發(fā)布時間:2023/12/8 pytorch 67 豆豆
生活随笔 收集整理的這篇文章主要介紹了 深度学习笔记(6)BatchNorm批量标准化 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.





代碼實現(xiàn):

from utils import * import numpyclass BatchNorm1D(LayerBase):""" 集成基礎的層,需要計算三個梯度,對x的梯度,以及線性變換的兩個梯度"""def __init__(self, momentum = 0.9, epsilon = 1e-6, optimizer=None):super().__init__(optimizer) # 必須有optimizer這個基類的變量。self.momentum = momentumself.epsilon = epsilonself.n_in = Noneself.n_out = None # 輸入和輸出的維度,也就是x的維度。x shape : batchsize, n_in. n_in == n_outself.params = {"scaler":None,"intercept": None, # 線性變換的兩個參數(shù)"running_mean": None,"running_var": None # 需要注意的是,前兩個參數(shù)是需要保存反向傳播梯度來進行學習的,后兩個參數(shù)則不用。}self.is_initialized = Falsedef _init_params(self,**kwargs):""" 進行參數(shù)的初始化"""scaler = np.random.rand(self.n_in)intercept = np.zeros(self.n_in)running_mean = np.zeros(self.n_in)running_var = np.ones(self.n_in)self.params = {"scaler": scaler,"intercept": intercept, # 線性變換的兩個參數(shù)"running_mean": running_mean,"running_var": running_var}self.gradients = {"scaler": np.zeros_like(scaler),"intercept": np.zeros_like(intercept)}self.is_initialized = Truedef reset_running_stats(self): # 在一個epoch 運行完成后,歸零。self.params["running_mean"] = np.zeros(self.n_in)self.params["running_var"] = np.ones(self.n_in)@propertydef hyperparams(self):return {"layer": "BatchNorm1D","acti_fn": None,"n_in": self.n_in,"n_out": self.n_out,"epsilon": self.epsilon,"momentum": self.momentum,"optimizer": {"cache": self.optimizer.cache,"hyperparams": self.optimizer.hyperparams,},}def forward(self, X, is_train = True, retain_dedrived = True):"""正向傳播::param X::param is_train::param retain_dedrived::return:[train]: Y = scaler * norm(X) + intercept,其中 norm(X) = (X - mean(X)) / sqrt(var(X) + epsilon)[test]: Y = scaler * running_norm(X) + intercept,running_norm(X) = (X - running_mean) / sqrt(running_var + epsilon)"""if not self.is_initialized : #沒有進行初始化。self.n_in = X.shape[1] # 輸入的維度。self._init_params()# 提取運算超參數(shù):momentum, epsilon = self.hyperparams["momentum"], self.hyperparams["epsilon"]rm, rv = self.params["running_mean"], self.params["running_var"]scaler, intercept = self.params["scaler"], self.params["intercept"]X_mean, X_var = self.params["running_mean"], self.params["running_var"] # 測試的話,直接用來計算。if is_train:X_mean, X_var = np.mean(X, axis=0), np.var(X, axis=0) # 計算當前batchsize的均值和方差。# 將移動平均和移動方差的值進行更新。self.params["running_mean"] = momentum * rm + (1-momentum)*X_meanself.params["running_mean"] = momentum * rv + (1-momentum)*X_meanif retain_dedrived:self.X.append(X)# 進行前向傳播:# Y = scaler * norm(X) + intercept,其中# norm(X) = (X - mean(X)) / sqrt(var(X) + epsilon)X_hat = (X- X_mean)/np.sqrt(X_var + epsilon)y = scaler*X_hat + interceptreturn ydef backward(self, dLda, retain_grads = True):"""反向傳播,計算scaler, intrecept 以及x的梯度并進行保存。"""if not isinstance(dLda, list):dLda = [dLda] # dlda shape : batchsize, n_classesdX = []X =self.X# 對每個batch進行梯度計算。for da, x in zip(dLda,X):dx, dScaler, dIntercept = self._bwd(da,x)dX.append(dx)if retain_grads:self.gradients["scaler"] = dScalerself.gradients["intercept"] = dScalerreturn dX[0] if len(dX) == 1 else dX # 這里是為了與上邊的DLDa對應,兩個都是lsit類型。def _bwd(self, dLda, X):""" 按照公式來計算三個梯度即可"""scaler = self.params["scaler"]epsi = self.hyperparams["epsilon"]n_ex, n_in = X.shapeX_mean, X_var = X.mean(axis=0), X.var(axis=0)X_hat = (X - X_mean) / np.sqrt(X_var + epsi)dIntercept = dLda.sum(axis=0)dScaler = np.sum(dLda * X_hat, axis=0)dX_hat = dLda * scalerdX = (n_ex * dX_hat - dX_hat.sum(axis=0) - X_hat * (dX_hat * X_hat).sum(axis=0)) / (n_ex * np.sqrt(X_var + epsi))return dX, dScaler, dIntercept

數(shù)據(jù):

(X_train, y_train), (X_test, y_test) = load_data() y_train = np.eye(10)[y_train.astype(int)] y_test = np.eye(10)[y_test.astype(int)] X_train = X_train.reshape(-1, X_train.shape[1]*X_train.shape[2]).astype('float32') X_test = X_test.reshape(-1, X_test.shape[1]*X_test.shape[2]).astype('float32') print(X_train.shape, y_train.shape) N = 20000 # 取 20000 條數(shù)據(jù)用以訓練 indices = np.random.permutation(range(X_train.shape[0]))[:N] X_train, y_train = X_train[indices], y_train[indices] print(X_train.shape, y_train.shape) X_train /= 255 X_train = (X_train - 0.5) * 2 X_test /= 255 X_test = (X_test - 0.5) * 2

定義具有BatchNorm層的全連接2層網(wǎng)絡:

class DFN2(object):def __init__(self,hidden_dims_1=None,hidden_dims_2=None,optimizer="sgd(lr=0.01)",init_w="std_normal",loss=CrossEntropy()):self.optimizer = optimizerself.init_w = init_wself.loss = lossself.hidden_dims_1 = hidden_dims_1self.hidden_dims_2 = hidden_dims_2self.is_initialized = Falsedef _set_params(self):"""函數(shù)作用:模型初始化FC1 -> Sigmoid -> BN -> FC2 -> Softmax"""self.layers = OrderedDict()self.layers["FC1"] = FullyConnected(n_out=self.hidden_dims_1,acti_fn="sigmoid",init_w=self.init_w,optimizer=self.optimizer)self.layers["BN"] = BatchNorm1D(optimizer=self.optimizer)self.layers["FC2"] = FullyConnected(n_out=self.hidden_dims_2,acti_fn="affine(slope=1,intercept=0)",init_w=self.init_w,optimizer=self.optimizer)self.layers["Softmax"] = Softmax(dim=-1, optimizer=self.optimizer)self.softmax = Softmax(dim=-1, optimizer=self.optimizer)self.is_initialized = Truedef forward(self, X_train):Xs = {}out = X_trainfor k, v in self.layers.items():Xs[k] = out # 每一層的輸出。out = v.forward(out)return out, Xsdef backward(self, grad):dXs = {}out = gradfor k, v in reversed(list(self.layers.items())):dXs[k] = outout = v.backward(out)return out, dXsdef update(self):"""函數(shù)作用:梯度更新"""for k, v in reversed(list(self.layers.items())):v.update()self.flush_gradients() # 反向更新梯度。def flush_gradients(self, curr_loss=None):"""函數(shù)作用:更新后重置梯度"""for k, v in self.layers.items():v.flush_gradients()def fit(self, X_train, y_train, n_epochs=20, batch_size=64, verbose=False):"""參數(shù)說明:X_train:訓練數(shù)據(jù)y_train:訓練數(shù)據(jù)標簽n_epochs:epoch 次數(shù)batch_size:每次 epoch 的 batch sizeverbose:是否每個 batch 輸出損失"""self.verbose = verboseself.n_epochs = n_epochsself.batch_size = batch_sizeif not self.is_initialized:self.n_features = X_train.shape[1]self._set_params()prev_loss = np.inf# softmax = self.softmax.forward #for i in range(n_epochs):loss, epoch_start = 0.0, time.time()batch_generator, n_batch = minibatch(X_train, self.batch_size, shuffle=True)for j, batch_idx in enumerate(batch_generator):batch_len, batch_start = len(batch_idx), time.time()X_batch, y_batch = X_train[batch_idx], y_train[batch_idx]out, _ = self.forward(X_batch)# y_pred_batch = softmax(out) #y_pred_batch = out #batch_loss = self.loss(y_batch, y_pred_batch)grad = self.loss.grad(y_batch, y_pred_batch) # 計算cross_entroy 交叉熵的梯度。_, _ = self.backward(grad) # 方向傳播,計算梯度。self.update() # 用梯度來更新參數(shù)。更新后將梯度清零。loss += batch_loss # 計算loss.if self.verbose:fstr = "\t[Batch {}/{}] Train loss: {:.3f} ({:.1f}s/batch)"print(fstr.format(j + 1, n_batch, batch_loss, time.time() - batch_start))loss /= n_batchfstr = "[Epoch {}] Avg. loss: {:.3f} Delta: {:.3f} ({:.2f}m/epoch)"print(fstr.format(i + 1, loss, prev_loss - loss, (time.time() - epoch_start) / 60.0))prev_loss = lossdef evaluate(self, X_test, y_test, batch_size=128):acc = 0.0batch_generator, n_batch = minibatch(X_test, batch_size, shuffle=True)for j, batch_idx in enumerate(batch_generator):batch_len, batch_start = len(batch_idx), time.time()X_batch, y_batch = X_test[batch_idx], y_test[batch_idx]y_pred_batch, _ = self.forward(X_batch)y_pred_batch = np.argmax(y_pred_batch, axis=1)y_batch = np.argmax(y_batch, axis=1)acc += np.sum(y_pred_batch == y_batch)return acc / X_test.shape[0]@propertydef hyperparams(self):return {"init_w": self.init_w,"loss": str(self.loss),"optimizer": self.optimizer,"hidden_dims_1": self.hidden_dims_1,"hidden_dims_2": self.hidden_dims_2,"components": {k: v.params for k, v in self.layers.items()}}

訓練測試:

""" 訓練""" # model = DFN(hidden_dims_1=200, hidden_dims_2=10) # model.fit(X_train, y_train, n_epochs=20) # print(model.evaluate(X_test,y_test)) #model = DFN2(hidden_dims_1=200, hidden_dims_2=10) model.fit(X_train, y_train, n_epochs=20, batch_size=64)print("accuracy:{}".format(model.evaluate(X_test, y_test)))

結果:

[Epoch 1] Avg. loss: 1.863 Delta: inf (0.03m/epoch) [Epoch 2] Avg. loss: 1.164 Delta: 0.699 (0.03m/epoch) [Epoch 3] Avg. loss: 0.830 Delta: 0.334 (0.03m/epoch) [Epoch 4] Avg. loss: 0.654 Delta: 0.176 (0.03m/epoch) [Epoch 5] Avg. loss: 0.558 Delta: 0.095 (0.03m/epoch) [Epoch 6] Avg. loss: 0.504 Delta: 0.055 (0.03m/epoch) [Epoch 7] Avg. loss: 0.466 Delta: 0.038 (0.03m/epoch) [Epoch 8] Avg. loss: 0.442 Delta: 0.024 (0.03m/epoch) [Epoch 9] Avg. loss: 0.422 Delta: 0.021 (0.03m/epoch) [Epoch 10] Avg. loss: 0.407 Delta: 0.014 (0.03m/epoch) [Epoch 11] Avg. loss: 0.397 Delta: 0.010 (0.03m/epoch) [Epoch 12] Avg. loss: 0.384 Delta: 0.013 (0.03m/epoch) [Epoch 13] Avg. loss: 0.376 Delta: 0.009 (0.03m/epoch) [Epoch 14] Avg. loss: 0.370 Delta: 0.005 (0.03m/epoch) [Epoch 15] Avg. loss: 0.363 Delta: 0.008 (0.03m/epoch) [Epoch 16] Avg. loss: 0.356 Delta: 0.007 (0.03m/epoch) [Epoch 17] Avg. loss: 0.350 Delta: 0.006 (0.03m/epoch) [Epoch 18] Avg. loss: 0.345 Delta: 0.005 (0.03m/epoch) [Epoch 19] Avg. loss: 0.339 Delta: 0.006 (0.03m/epoch) [Epoch 20] Avg. loss: 0.339 Delta: 0.000 (0.03m/epoch) accuracy:0.9131

總結

以上是生活随笔為你收集整理的深度学习笔记(6)BatchNorm批量标准化的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。