Dropout_layer.cpp(防止过拟合)
生活随笔
收集整理的這篇文章主要介紹了
Dropout_layer.cpp(防止过拟合)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
dropout層的作用是防止訓練的時候過擬合。在訓練的時候,傳統的訓練方法是每次迭代經過某一層時,將所有的結點拿來做參與更新,訓練整個網絡。加入dropout層,我們只需要按一定的概率(retaining probability)p 來對weight layer 的參數進行隨機采樣,將被采樣的結點拿來參與更新,將這個子網絡作為此次更新的目標網絡。這樣做的好處是,由于隨機的讓一些節點不工作了,因此可以避免某些特征只在固定組合下才生效,有意識地讓網絡去學習一些普遍的共性(而不是某些訓練樣本的一些特性)這樣能提高訓練出的模型的魯棒性!!!
下面記錄下我在看dropout層時的注釋,如有錯誤,請指出~~~
Dropout_layer.hpp::::
#ifndef CAFFE_DROPOUT_LAYER_HPP_ #define CAFFE_DROPOUT_LAYER_HPP_#include <vector>#include "caffe/blob.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h"#include "caffe/layers/neuron_layer.hpp"namespace caffe {/*** @brief During training only, sets a random portion of @f$x@f$ to 0, adjusting* the rest of the vector magnitude accordingly.** @param bottom input Blob vector (length 1)* -# @f$ (N \times C \times H \times W) @f$* the inputs @f$ x @f$* @param top output Blob vector (length 1)* -# @f$ (N \times C \times H \times W) @f$* the computed outputs @f$ y = |x| @f$*//*DropoutLayer類繼承了類NeuronLayer類*/ template <typename Dtype> class DropoutLayer : public NeuronLayer<Dtype> {public:/*** @param param provides DropoutParameter dropout_param,* with DropoutLayer options:* - dropout_ratio (\b optional, default 0.5).* Sets the probability @f$ p @f$ that any given unit is dropped.*//*構造函數*/explicit DropoutLayer(const LayerParameter& param): NeuronLayer<Dtype>(param) {}/*設置函數*/virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);/*內存分配與輸入輸出數據形狀reshape函數*/virtual void Reshape(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);/*返回當前層的類型*/virtual inline const char* type() const { return "Dropout"; }protected:/*** @param bottom input Blob vector (length 1)* -# @f$ (N \times C \times H \times W) @f$* the inputs @f$ x @f$* @param top output Blob vector (length 1)* -# @f$ (N \times C \times H \times W) @f$* the computed outputs. At training time, we have @f$* y_{\mbox{train}} = \left\{* \begin{array}{ll}* \frac{x}{1 - p} & \mbox{if } u > p \\* 0 & \mbox{otherwise}* \end{array} \right.* @f$, where @f$ u \sim U(0, 1)@f$ is generated independently for each* input at each iteration. At test time, we simply have* @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$.*//*cpu前向傳播函數*/virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);/*gpu前向傳播函數*/virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);/*cpu返向傳播函數*/virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);/*gpu返回傳播函數*/virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);/// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$/*blob類型的,保存伯努利二項分布的隨機數的變量*/Blob<unsigned int> rand_vec_;/// the probability @f$ p @f$ of dropping any input/*數據被dropout(意思就是迭代的某次訓練不用)的概率*/Dtype threshold_;/// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$/*scale_ == 1 / (1 - threshold_)*/Dtype scale_;/*沒有具體用到,不知其何意*/unsigned int uint_thres_; };} // namespace caffe#endif // CAFFE_DROPOUT_LAYER_HPP_- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
Dropout_layer.cpp:::
// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy.#include <vector>#include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp"namespace caffe {/*設置dropout層對象,先調用NeuronLayer類完成基本設置*/ template <typename Dtype> void DropoutLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {NeuronLayer<Dtype>::LayerSetUp(bottom, top);/*protobuf文件中傳入的dropout的概率,也就是當前去除掉threshold_概率個數據不用*//*因為是有放回的隨機去除掉threshold_概率個數據,那么每個數據被去除的概率為threshold_*/threshold_ = this->layer_param_.dropout_param().dropout_ratio();DCHECK(threshold_ > 0.);DCHECK(threshold_ < 1.);/*(1. - threshold_)是這個數據被取用的概率*/scale_ = 1. / (1. - threshold_);uint_thres_ = static_cast<unsigned int>(UINT_MAX * threshold_);/*貌似沒有用到*/ }/*形狀reshape和內存分配,同理先調用NeuronLayer類的Reshape函數完成基本的top與bottom數據的reshape*/ template <typename Dtype> void DropoutLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {NeuronLayer<Dtype>::Reshape(bottom, top);// Set up the cache for random number generation// ReshapeLike does not work because rand_vec_ is of Dtype uint//這個類要單獨分配一段內存用來存儲滿足伯努利分布的隨機數rand_vec_.Reshape(bottom[0]->shape()); }/*dropout層的前向傳播*/ template <typename Dtype> void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {const Dtype* bottom_data = bottom[0]->cpu_data();/*前面一層數據內存地址(輸入數據)*/Dtype* top_data = top[0]->mutable_cpu_data();/*后面一層數據內存地址(輸出數據)*/unsigned int* mask = rand_vec_.mutable_cpu_data();/*伯努利分布的隨機數的內存地址*/const int count = bottom[0]->count();/*輸入數據blob個數*/if (this->phase_ == TRAIN) {/*當前處在測試階段*/// Create random numberscaffe_rng_bernoulli(count, 1. - threshold_, mask); /*產生伯努利隨機數*/for (int i = 0; i < count; ++i) {top_data[i] = bottom_data[i] * mask[i] * scale_; /*遍歷每個數據在滿足伯努利分布的下的輸出值*/}} else {caffe_copy(bottom[0]->count(), bottom_data, top_data); /*測試階段每個數據都要輸出*/} }/*dropout層的后向傳播*/ template <typename Dtype> void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, /*這個向量記錄當前數據了是否進行返向傳播*/const vector<Blob<Dtype>*>& bottom) {if (propagate_down[0]) {/*如果進行反向傳播*/const Dtype* top_diff = top[0]->cpu_diff();/*后面一層梯度(輸入數據)*/Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();/*前面一層梯度(輸入數據)*/if (this->phase_ == TRAIN) {/*訓練階段*/const unsigned int* mask = rand_vec_.cpu_data();/*伯努利分布的隨機數*/const int count = bottom[0]->count();/*輸入數據blob個數*/for (int i = 0; i < count; ++i) {bottom_diff[i] = top_diff[i] * mask[i] * scale_;/*返向傳播梯度*/}} else {caffe_copy(top[0]->count(), top_diff, bottom_diff);/*如果不是訓練就直接拷貝數據*/}} }#ifdef CPU_ONLY STUB_GPU(DropoutLayer); #endifINSTANTIATE_CLASS(DropoutLayer); REGISTER_LAYER_CLASS(Dropout);} // namespace caffe總結
以上是生活随笔為你收集整理的Dropout_layer.cpp(防止过拟合)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 深度学习方法(五):卷积神经网络CNN经
- 下一篇: 彻底解决 libhdf5_hl.so.1