caffe模型weightsfeatureMap 可视化(c++)
版權聲明:本文為博主原創文章,未經博主允許不得轉載。 https://blog.csdn.net/qq_14845119/article/details/74931602
caffe模型在訓練完成后,會生成一個*.caffemodel的文件,在運行的時候,直接調用caffe就可以讀取其中的相應權值參數。但是如果用一個第三方軟件打開這個,卻是不可以可視化的二值亂碼。
?
將模型中的參數導出,可編輯化后能有哪些好處呢,
(1)方便進行fpga平臺的移植
(2)可以基于別人訓練好的模型,0數據訓練自己的模型,使用自己的模型擬合別人模型的權值分布,達到用模型訓模型的目的。
(3)可以對網絡進行剪支,加速等操作。
?
?
將模型中的特征圖和權值可視化有哪些好處呢,
(1)方便對卷積網絡的特征有所了解,訓練好的特征總是有規則的特征圖,可以側面輔助訓練過程。?
這里分析lenet5這樣的網絡結構,所有其他網絡都通用。
?
核心程序:
(1)只導出weights,不進行顯示
void parse_caffemodel(string caffemodel, string outtxt)
{
printf("%s\n", caffemodel.c_str());
NetParameter net;
bool success = loadCaffemodel(caffemodel.c_str(), &net);
if (!success){
printf("讀取錯誤啦:%s\n", caffemodel.c_str());
return;
}
FILE* fmodel = fopen(outtxt.c_str(), "wb");
for (int i = 0; i < net.layer_size(); ++i){
LayerParameter& param = *net.mutable_layer(i);
int n = param.mutable_blobs()->size();
if (n){
const BlobProto& blob = param.blobs(0);
printf("layer: %s weight(%d)", param.name().c_str(), blob.data_size());
fprintf(fmodel, "\nlayer: %s weight(%d)\n", param.name().c_str(), blob.data_size());
writeData(fmodel, blob.data().data(), blob.data_size());
if (n > 1){
const BlobProto& bais = param.blobs(1);
printf(" bais(%d)", bais.data_size());
fprintf(fmodel, "\nlayer: %s bais(%d)\n", param.name().c_str(), bais.data_size());
writeData(fmodel, bais.data().data(), bais.data_size());
}
printf("\n");
}
}
fclose(fmodel);
}
?
(2)weights可視化
cv::Mat visualize_weights(string prototxt, string caffemodel, int weights_layer_num)
{
::google::InitGoogleLogging("0");
#ifdef CPU_ONLY
Caffe::set_mode(Caffe::CPU);
#else
Caffe::set_mode(Caffe::GPU);
#endif
Net<float> net(prototxt, TEST);
net.CopyTrainedLayersFrom(caffemodel);
vector<shared_ptr<Blob<float> > > params = net.params();
std::cout << "各層參數的維度信息為:\n";
for (int i = 0; i<params.size(); ++i)
std::cout << params[i]->shape_string() << std::endl;
int width = params[weights_layer_num]->shape(3); //寬度
int height = params[weights_layer_num]->shape(2); //高度
int channel = params[weights_layer_num]->shape(1); //通道數
int num = params[weights_layer_num]->shape(0); //個數
int imgHeight = (int)(1 + sqrt(num))*height;
int imgWidth = (int)(1 + sqrt(num))*width;
Mat img(imgHeight, imgWidth, CV_8UC3, Scalar(0, 0, 0));
float maxValue = -1000, minValue = 10000;
const float* tmpValue = params[weights_layer_num]->cpu_data();
for (int i = 0; i<params[weights_layer_num]->count(); i++){
maxValue = std::max(maxValue, tmpValue[i]);
minValue = std::min(minValue, tmpValue[i]);
}
int kk = 0;
for (int y = 0; y<imgHeight; y += height){
for (int x = 0; x<imgWidth; x += width){
if (kk >= num)
continue;
Mat roi = img(Rect(x, y, width, height));
for (int i = 0; i<height; i++){
for (int j = 0; j<width; j++){
for (int k = 0; k<channel; k++){
float value = params[weights_layer_num]->data_at(kk, k, i, j);
roi.at<Vec3b>(i, j)[k] = (value - minValue) / (maxValue - minValue) * 255; }
}
}
++kk;
}
}
return img;
}
(3)featuremap可視化
cv::Mat Classifier::visualize_featuremap(const cv::Mat& img,string layer_name)
{
Blob<float>* input_layer = net_->input_blobs()[0];
input_layer->Reshape(1, num_channels_, input_geometry_.height, input_geometry_.width);
net_->Reshape();
std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels);
Preprocess(img, &input_channels);
net_->Forward();
std::cout << "網絡中的Blobs名稱為:\n";
vector<shared_ptr<Blob<float> > > blobs = net_->blobs();
vector<string> blob_names = net_->blob_names();
std::cout << blobs.size() << " " << blob_names.size() << std::endl;
for (int i = 0; i<blobs.size(); i++){
std::cout << blob_names[i] << " " << blobs[i]->shape_string() << std::endl;
}
std::cout << std::endl;
assert(net_->has_blob(layer_name));
shared_ptr<Blob<float> > conv1Blob = net_->blob_by_name(layer_name);
std::cout << "測試圖片的特征響應圖的形狀信息為:" << conv1Blob->shape_string() << std::endl;
float maxValue = -10000000, minValue = 10000000;
const float* tmpValue = conv1Blob->cpu_data();
for (int i = 0; i<conv1Blob->count(); i++){
maxValue = std::max(maxValue, tmpValue[i]);
minValue = std::min(minValue, tmpValue[i]);
}
int width = conv1Blob->shape(3); //響應圖的高度
int height = conv1Blob->shape(2); //響應圖的寬度
int channel = conv1Blob->shape(1); //通道數
int num = conv1Blob->shape(0); //個數
int imgHeight = (int)(1 + sqrt(channel))*height;
int imgWidth = (int)(1 + sqrt(channel))*width;
cv::Mat img(imgHeight, imgWidth, CV_8UC1, cv::Scalar(0));
int kk = 0;
for (int x = 0; x<imgHeight; x += height){
for (int y = 0; y<imgWidth; y += width){
if (kk >= channel)
continue;
cv::Mat roi = img(cv::Rect(y, x, width, height));
for (int i = 0; i<height; i++){
for (int j = 0; j<width; j++){
float value = conv1Blob->data_at(0, kk, i, j);
roi.at<uchar>(i, j) = (value - minValue) / (maxValue - minValue) * 255;
}
}
kk++;
}
}
return img;
}
?
運行結果:
(1)
string caffemodel = "lenet_iter_10000.caffemodel";;
string outtxt = "lenet.txt";
parse_caffemodel(caffemodel, outtxt);
?
?
?
(2)
string prototxt = "lenet.prototxt";
string caffemodel = "lenet_iter_10000.caffemodel";
int weights_layer_num = 0;
Mat image=visualize_weights(prototxt, caffemodel, weights_layer_num);
imshow("weights", image);
waitKey(0);
?
?
(3)
::google::InitGoogleLogging(argv[0]);
string model_file = "lenet.prototxt";
string trained_file = "lenet_iter_10000.caffemodel";
Classifier classifier(model_file, trained_file);
string file = "5.jpg";
cv::Mat img = cv::imread(file, -1);
CHECK(!img.empty()) << "Unable to decode image " << file;
cv::Mat feature_map = classifier.visualize_featuremap(img,"conv2");
imshow("feature_map", feature_map);
cv::waitKey(0);
?
?
?
?
將權值導入matlab中,可以看到權值基本都是服從均值為0,方差很小的分布。
?
?
?
完整程序下載鏈接:http://download.csdn.net/detail/qq_14845119/9895412
總結
以上是生活随笔為你收集整理的caffe模型weightsfeatureMap 可视化(c++)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 协方差代表的意义是什么?
- 下一篇: 简单的加密/解密算法_/c++