Caffe
python_layer.hpp
1 #ifndef CAFFE_PYTHON_LAYER_HPP_
2 #define CAFFE_PYTHON_LAYER_HPP_
3 
4 #include <boost/python.hpp>
5 #include <vector>
6 
7 #include "caffe/layer.hpp"
8 
9 namespace bp = boost::python;
10 
11 namespace caffe {
12 
13 template <typename Dtype>
14 class PythonLayer : public Layer<Dtype> {
15  public:
16  PythonLayer(PyObject* self, const LayerParameter& param)
17  : Layer<Dtype>(param), self_(bp::handle<>(bp::borrowed(self))) { }
18 
19  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
20  const vector<Blob<Dtype>*>& top) {
21  // Disallow PythonLayer in MultiGPU training stage, due to GIL issues
22  // Details: https://github.com/BVLC/caffe/issues/2936
23  if (this->phase_ == TRAIN && Caffe::solver_count() > 1
24  && !ShareInParallel()) {
25  LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training";
26  }
27  self_.attr("param_str") = bp::str(
28  this->layer_param_.python_param().param_str());
29  self_.attr("phase") = static_cast<int>(this->phase_);
30  self_.attr("setup")(bottom, top);
31  }
32  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
33  const vector<Blob<Dtype>*>& top) {
34  self_.attr("reshape")(bottom, top);
35  }
36 
37  virtual inline bool ShareInParallel() const {
38  return this->layer_param_.python_param().share_in_parallel();
39  }
40 
41  virtual inline const char* type() const { return "Python"; }
42 
43  protected:
44  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
45  const vector<Blob<Dtype>*>& top) {
46  self_.attr("forward")(bottom, top);
47  }
48  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
49  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
50  self_.attr("backward")(top, propagate_down, bottom);
51  }
52 
53  private:
54  bp::object self_;
55 };
56 
57 } // namespace caffe
58 
59 #endif
An interface for the units of computation which can be composed into a Net.
Definition: layer.hpp:33
A layer factory that allows one to register layers. During runtime, registered layers can be called b...
Definition: blob.hpp:14
virtual void Backward_cpu(const vector< Blob< Dtype > *> &top, const vector< bool > &propagate_down, const vector< Blob< Dtype > *> &bottom)
Using the CPU device, compute the gradients for any parameters and for the bottom blobs if propagate_...
Definition: python_layer.hpp:48
virtual void LayerSetUp(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Does layer-specific setup: your layer should implement this function as well as Reshape.
Definition: python_layer.hpp:19
virtual bool ShareInParallel() const
Whether a layer should be shared by multiple nets during data parallelism. By default, all layers except for data layers should not be shared. data layers should be shared to ensure each worker solver access data sequentially during data parallelism.
Definition: python_layer.hpp:37
Definition: python_layer.hpp:14
virtual void Forward_cpu(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Using the CPU device, compute the layer output.
Definition: python_layer.hpp:44
Phase phase_
Definition: layer.hpp:324
LayerParameter layer_param_
Definition: layer.hpp:322
virtual const char * type() const
Returns the layer type.
Definition: python_layer.hpp:41
virtual void Reshape(const vector< Blob< Dtype > *> &bottom, const vector< Blob< Dtype > *> &top)
Adjust the shapes of top blobs and internal buffers to accommodate the shapes of the bottom blobs...
Definition: python_layer.hpp:32
A wrapper around SyncedMemory holders serving as the basic computational unit through which Layers...
Definition: blob.hpp:24