程序師世界是廣大編程愛好者互助、分享、學習的平台,程序師世界有你更精彩!
首頁
編程語言
C語言|JAVA編程
Python編程
網頁編程
ASP編程|PHP編程
JSP編程
數據庫知識
MYSQL數據庫|SqlServer數據庫
Oracle數據庫|DB2數據庫
 程式師世界 >> 編程語言 >> C語言 >> C++ >> 關於C++ >> caffe的c++接口

caffe的c++接口

編輯:關於C++

caffe的c++接口:caffe中的C++接口怎麼調用呢?有什麼方法能夠快速調用呢?接下來紅黑小編就來介紹一下caffe的c++接口的調用方法,希望對大家有所幫助。
接口可以完全按照官網的分類cpp文件調用學習。

classification.cpp

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace caffe;  // NOLINT(build/namespaces)
using std::string;

/* Pair (label, confidence) representing a prediction. */
typedef std::pair Prediction;

class Classifier {
 public:
  Classifier(const string& model_file,
             const string& trained_file,
             const string& mean_file,
             const string& label_file);

  std::vector Classify(const cv::Mat& img, int N = 5);

 private:
  void SetMean(const string& mean_file);

  std::vector Predict(const cv::Mat& img);

  void WrapInputLayer(std::vector* input_channels);

  void Preprocess(const cv::Mat& img,
                  std::vector* input_channels);

 private:
  shared_ptr > net_;
  cv::Size input_geometry_;
  int num_channels_;
  cv::Mat mean_;
  std::vector labels_;
};

Classifier::Classifier(const string& model_file,
                       const string& trained_file,
                       const string& mean_file,
                       const string& label_file) {
#ifdef CPU_ONLY
  Caffe::set_mode(Caffe::CPU);
#else
  Caffe::set_mode(Caffe::GPU);
#endif

  /* Load the network. */
  net_.reset(new Net(model_file, TEST));
  net_->CopyTrainedLayersFrom(trained_file);

  CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
  CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";

  Blob* input_layer = net_->input_blobs()[0];
  num_channels_ = input_layer->channels();
  CHECK(num_channels_ == 3 || num_channels_ == 1)
    << "Input layer should have 1 or 3 channels.";
  input_geometry_ = cv::Size(input_layer->width(), input_layer->height());

  /* Load the binaryproto mean file. */
  SetMean(mean_file);

  /* Load labels. */
  std::ifstream labels(label_file.c_str());
  CHECK(labels) << "Unable to open labels file " << label_file;
  string line;
  while (std::getline(labels, line))
    labels_.push_back(string(line));

  Blob* output_layer = net_->output_blobs()[0];
  CHECK_EQ(labels_.size(), output_layer->channels())
    << "Number of labels is different from the output layer dimension.";
}

static bool PairCompare(const std::pair& lhs,
                        const std::pair& rhs) {
  return lhs.first > rhs.first;
}

/* Return the indices of the top N values of vector v. */
static std::vector Argmax(const std::vector& v, int N) {
  std::vector > pairs;
  for (size_t i = 0; i < v.size(); ++i)
    pairs.push_back(std::make_pair(v[i], i));
  std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);

  std::vector result;
  for (int i = 0; i < N; ++i)
    result.push_back(pairs[i].second);
  return result;
}

/* Return the top N predictions. */
std::vector Classifier::Classify(const cv::Mat& img, int N) {
  std::vector output = Predict(img);

  std::vector maxN = Argmax(output, N);
  std::vector predictions;
  for (int i = 0; i < N; ++i) {
    int idx = maxN[i];
    predictions.push_back(std::make_pair(labels_[idx], output[idx]));
  }

  return predictions;
}

/* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file) {
  BlobProto blob_proto;
  ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);

  /* Convert from BlobProto to Blob */
  Blob mean_blob;
  mean_blob.FromProto(blob_proto);
  CHECK_EQ(mean_blob.channels(), num_channels_)
    << "Number of channels of mean file doesn't match input layer.";

  /* The format of the mean file is planar 32-bit float BGR or grayscale. */
  std::vector channels;
  float* data = mean_blob.mutable_cpu_data();
  for (int i = 0; i < num_channels_; ++i) {
    /* Extract an individual channel. */
    cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
    channels.push_back(channel);
    data += mean_blob.height() * mean_blob.width();
  }

  /* Merge the separate channels into a single image. */
  cv::Mat mean;
  cv::merge(channels, mean);

  /* Compute the global mean pixel value and create a mean image
   * filled with this value. */
  cv::Scalar channel_mean = cv::mean(mean);
  mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
}

std::vector Classifier::Predict(const cv::Mat& img) {
  Blob* input_layer = net_->input_blobs()[0];
  input_layer->Reshape(1, num_channels_,
                       input_geometry_.height, input_geometry_.width);
  /* Forward dimension change to all layers. */
  net_->Reshape();

  std::vector input_channels;
  WrapInputLayer(&input_channels);

  Preprocess(img, &input_channels);

  net_->ForwardPrefilled();

  /* Copy the output layer to a std::vector */
  Blob* output_layer = net_->output_blobs()[0];
  const float* begin = output_layer->cpu_data();
  const float* end = begin + output_layer->channels();
  return std::vector(begin, end);
}

/* Wrap the input layer of the network in separate cv::Mat objects
 * (one per channel). This way we save one memcpy operation and we
 * don't need to rely on cudaMemcpy2D. The last preprocessing
 * operation will write the separate channels directly to the input
 * layer. */
void Classifier::WrapInputLayer(std::vector* input_channels) {
  Blob* input_layer = net_->input_blobs()[0];

  int width = input_layer->width();
  int height = input_layer->height();
  float* input_data = input_layer->mutable_cpu_data();
  for (int i = 0; i < input_layer->channels(); ++i) {
    cv::Mat channel(height, width, CV_32FC1, input_data);
    input_channels->push_back(channel);
    input_data += width * height;
  }
}

void Classifier::Preprocess(const cv::Mat& img,
                            std::vector* input_channels) {
  /* Convert the input image to the input image format of the network. */
  cv::Mat sample;
  if (img.channels() == 3 && num_channels_ == 1)
    cv::cvtColor(img, sample, CV_BGR2GRAY);
  else if (img.channels() == 4 && num_channels_ == 1)
    cv::cvtColor(img, sample, CV_BGRA2GRAY);
  else if (img.channels() == 4 && num_channels_ == 3)
    cv::cvtColor(img, sample, CV_BGRA2BGR);
  else if (img.channels() == 1 && num_channels_ == 3)
    cv::cvtColor(img, sample, CV_GRAY2BGR);
  else
    sample = img;

  cv::Mat sample_resized;
  if (sample.size() != input_geometry_)
    cv::resize(sample, sample_resized, input_geometry_);
  else
    sample_resized = sample;

  cv::Mat sample_float;
  if (num_channels_ == 3)
    sample_resized.convertTo(sample_float, CV_32FC3);
  else
    sample_resized.convertTo(sample_float, CV_32FC1);

  cv::Mat sample_normalized;
  cv::subtract(sample_float, mean_, sample_normalized);

  /* This operation will write the separate BGR planes directly to the
   * input layer of the network because it is wrapped by the cv::Mat
   * objects in input_channels. */
  cv::split(sample_normalized, *input_channels);

  CHECK(reinterpret_cast(input_channels->at(0).data)
        == net_->input_blobs()[0]->cpu_data())
    << "Input channels are not wrapping the input layer of the network.";
}

int main(int argc, char** argv) {
  if (argc != 6) {
    std::cerr << "Usage: " << argv[0]
              << " deploy.prototxt network.caffemodel"
              << " mean.binaryproto labels.txt img.jpg" << std::endl;
    return 1;
  }

  ::google::InitGoogleLogging(argv[0]);

  string model_file   = argv[1];
  string trained_file = argv[2];
  string mean_file    = argv[3];
  string label_file   = argv[4];
  Classifier classifier(model_file, trained_file, mean_file, label_file);

  string file = argv[5];

  std::cout << "---------- Prediction for "
            << file << " ----------" << std::endl;

  cv::Mat img = cv::imread(file, -1);
  CHECK(!img.empty()) << "Unable to decode image " << file;
  std::vector predictions = classifier.Classify(img);

  /* Print the top N predictions. */
  for (size_t i = 0; i < predictions.size(); ++i) {
    Prediction p = predictions[i];
    std::cout << std::fixed << std::setprecision(4) << p.second << " - \""
              << p.first << "\"" << std::endl;
  }
}

海康的星空下的巫師一個腳本:

 

/************************初始化網絡**************************/
#include "caffe/caffe.hpp"
#include 
#include 
using namespace caffe;

char *proto = "H:\\Models\\Caffe\\deploy.prototxt"; /* 加載CaffeNet的配置 */
Phase phase = TEST; /* or TRAIN */
Caffe::set_mode(Caffe::CPU);
// Caffe::set_mode(Caffe::GPU);
// Caffe::SetDevice(0);

//! Note: 後文所有提到的net,都是這個net
boost::shared_ptr< Net > net(new caffe::Net(proto, phase));
/************************加載已訓練好的模型**************************/
char *model = "H:\\Models\\Caffe\\bvlc_reference_caffenet.caffemodel";    
net->CopyTrainedLayersFrom(model);
/*******************讀取模型中的每層的結構配置參數*************************/

char *model = "H:\\Models\\Caffe\\bvlc_reference_caffenet.caffemodel";
NetParameter param;
ReadNetParamsFromBinaryFileOrDie(model, ¶m);
int num_layers = param.layer_size();
for (int i = 0; i < num_layers; ++i)
{
    // 結構配置參數:name,type,kernel size,pad,stride等
    LOG(ERROR) << "Layer " << i << ":" << param.layer(i).name() << "\t" << param.layer(i).type();
    if (param.layer(i).type() == "Convolution")
    {
        ConvolutionParameter conv_param = param.layer(i).convolution_param();
        LOG(ERROR) << "\t\tkernel size: " << conv_param.kernel_size()
            << ", pad: " << conv_param.pad()
            << ", stride: " << conv_param.stride();
    }
}
/************************讀取圖像均值**************************/


char *mean_file = "H:\\Models\\Caffe\\imagenet_mean.binaryproto";
Blob image_mean;
BlobProto blob_proto;
const float *mean_ptr;
unsigned int num_pixel;

bool succeed = ReadProtoFromBinaryFile(mean_file, &blob_proto);
if (succeed)
{
    image_mean.FromProto(blob_proto);
    num_pixel = image_mean.count(); /* NCHW=1x3x256x256=196608 */
    mean_ptr = (const float *) image_mean.cpu_data();
}
/************************根據指定數據,前向傳播網絡**************************/


//! Note: data_ptr指向已經處理好(去均值的,符合網絡輸入圖像的長寬和Batch Size)的數據
void caffe_forward(boost::shared_ptr< Net > & net, float *data_ptr)
{
    Blob* input_blobs = net->input_blobs()[0];
    switch (Caffe::mode())
    {
    case Caffe::CPU:
        memcpy(input_blobs->mutable_cpu_data(), data_ptr,
            sizeof(float) * input_blobs->count());
        break;
    case Caffe::GPU:
        cudaMemcpy(input_blobs->mutable_gpu_data(), data_ptr,
            sizeof(float) * input_blobs->count(), cudaMemcpyHostToDevice);
        break;
    default:
        LOG(FATAL) << "Unknown Caffe mode.";
    } 
    net->ForwardPrefilled();
}
/*********************根據Feature層的名字獲取其在網絡中的Index******************/

//! Note: Net的Blob是指,每個層的輸出數據,即Feature Maps
// char *query_blob_name = "conv1";
unsigned int get_blob_index(boost::shared_ptr< Net > & net, char *query_blob_name)
{
    std::string str_query(query_blob_name);    
    vector< string > const & blob_names = net->blob_names();
    for( unsigned int i = 0; i != blob_names.size(); ++i ) 
    { 
        if( str_query == blob_names[i] ) 
        { 
            return i;
        } 
    }
    LOG(FATAL) << "Unknown blob name: " << str_query;
}
/*********************讀取網絡指定Feature層數據*****************/


//! Note: 根據CaffeNet的deploy.prototxt文件,該Net共有15個Blob,從data一直到prob    
char *query_blob_name = "conv1"; /* data, conv1, pool1, norm1, fc6, prob, etc */
unsigned int blob_id = get_blob_index(net, query_blob_name);

boost::shared_ptr > blob = net->blobs()[blob_id];
unsigned int num_data = blob->count(); /* NCHW=10x96x55x55 */
const float *blob_ptr = (const float *) blob->cpu_data();
/*********************根據文件列表,獲取特征,並存為二進制文件*****************/

/*
主要包括三個步驟

生成文件列表,格式與訓練用的類似,每行一個圖像 包括文件全路徑、空格、標簽(沒有的話,可以置0)
根據train_val或者deploy的prototxt,改寫生成feat.prototxt 主要是將輸入層改為image_data層,最後加上prob和argmax(為了輸出概率和Top1/5預測標簽)
根據指定參數,運行程序後會生成若干個二進制文件,可以用MATLAB讀取數據,進行分析
根據Layer的名字獲取其在網絡中的Index
*/
//! Note: Layer包括神經網絡所有層,比如,CaffeNet共有23層
// char *query_layer_name = "conv1";
unsigned int get_layer_index(boost::shared_ptr< Net > & net, char *query_layer_name)
{
    std::string str_query(query_layer_name);    
    vector< string > const & layer_names = net->layer_names();
    for( unsigned int i = 0; i != layer_names.size(); ++i ) 
    { 
        if( str_query == layer_names[i] ) 
        { 
            return i;
        } 
    }
    LOG(FATAL) << "Unknown layer name: " << str_query;
}
/*********************讀取指定Layer的權重數據*****************/

//! Note: 不同於Net的Blob是Feature Maps,Layer的Blob是指Conv和FC等層的Weight和Bias
char *query_layer_name = "conv1";
const float *weight_ptr, *bias_ptr;
unsigned int layer_id = get_layer_index(net, query_layer_name);
boost::shared_ptr > layer = net->layers()[layer_id];
std::vector  >> blobs = layer->blobs();
if (blobs.size() > 0)
{
    weight_ptr = (const float *) blobs[0]->cpu_data();
    bias_ptr = (const float *) blobs[1]->cpu_data();
}

//! Note: 訓練模式下,讀取指定Layer的梯度數據,與此相似,唯一的區別是將cpu_data改為cpu_diff
/*********************修改某層的Weight數據*****************/


const float* data_ptr;          /* 指向待寫入數據的指針, 源數據指針*/
float* weight_ptr = NULL;       /* 指向網絡中某層權重的指針,目標數據指針*/
unsigned int data_size;         /* 待寫入的數據量 */
char *layer_name = "conv1";     /* 需要修改的Layer名字 */

unsigned int layer_id = get_layer_index(net, query_layer_name);    
boost::shared_ptr > blob = net->layers()[layer_id]->blobs()[0];

CHECK(data_size == blob->count());
switch (Caffe::mode())
{
case Caffe::CPU:
    weight_ptr = blob->mutable_cpu_data();
    break;
case Caffe::GPU:
    weight_ptr = blob->mutable_gpu_data();
    break;
default:
    LOG(FATAL) << "Unknown Caffe mode";
}
caffe_copy(blob->count(), data_ptr, weight_ptr);

//! Note: 訓練模式下,手動修改指定Layer的梯度數據,與此相似
// mutable_cpu_data改為mutable_cpu_diff,mutable_gpu_data改為mutable_gpu_diff
/*********************保存新的模型*****************/

char* weights_file = "bvlc_reference_caffenet_new.caffemodel";
NetParameter net_param;
net->ToProto(&net_param, false);
WriteProtoToBinaryFile(net_param, weights_file);
  1. 上一頁:
  2. 下一頁:
Copyright © 程式師世界 All Rights Reserved