Skip to content
This repository was archived by the owner on Oct 1, 2019. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions include/caffe/vision_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ class MemoryDataLayer : public Layer<Dtype> {
int datum_channels() { return datum_channels_; }
int datum_height() { return datum_height_; }
int datum_width() { return datum_width_; }
int datum_length() { return datum_length_; }
int batch_size() { return batch_size_; }

protected:
Expand All @@ -285,6 +286,7 @@ class MemoryDataLayer : public Layer<Dtype> {
int datum_channels_;
int datum_height_;
int datum_width_;
int datum_length_;
int datum_size_;
int batch_size_;
int n_;
Expand Down Expand Up @@ -315,6 +317,7 @@ class PoolingLayer : public Layer<Dtype> {
int channels_;
int height_;
int width_;
int length_;
int pooled_height_;
int pooled_width_;
Blob<Dtype> rand_idx_;
Expand Down
27 changes: 18 additions & 9 deletions python/caffe/_caffe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <string> // NOLINT(build/include_order)
#include <vector> // NOLINT(build/include_order)
#include <fstream> // NOLINT
#include <iostream>

#include "caffe/caffe.hpp"

Expand All @@ -23,7 +24,7 @@
#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x))
#endif


using namespace std;
using namespace caffe; // NOLINT(build/namespaces)
using boost::python::extract;
using boost::python::len;
Expand Down Expand Up @@ -56,14 +57,14 @@ class CaffeBlob {
int num() const { return blob_->num(); }
int channels() const { return blob_->channels(); }
int height() const { return blob_->height(); }
int length() const { return blob_->length();}
int width() const { return blob_->width(); }
int count() const { return blob_->count(); }

// this is here only to satisfy boost's vector_indexing_suite
bool operator == (const CaffeBlob &other) {
return this->blob_ == other.blob_;
}

protected:
shared_ptr<Blob<float> > blob_;
string name_;
Expand All @@ -79,9 +80,9 @@ class CaffeBlobWrap : public CaffeBlob {
: CaffeBlob(blob), self_(p) {}

object get_data() {
npy_intp dims[] = {num(), channels(), height(), width()};
npy_intp dims[] = {num(), channels(), height(), width(), length()};

PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32,
PyObject *obj = PyArray_SimpleNewFromData(5, dims, NPY_FLOAT32,
blob_->mutable_cpu_data());
PyArray_SetBaseObject(reinterpret_cast<PyArrayObject *>(obj), self_);
Py_INCREF(self_);
Expand All @@ -91,9 +92,9 @@ class CaffeBlobWrap : public CaffeBlob {
}

object get_diff() {
npy_intp dims[] = {num(), channels(), height(), width()};
npy_intp dims[] = {num(), channels(), height(), width(), length()};

PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32,
PyObject *obj = PyArray_SimpleNewFromData(5, dims, NPY_FLOAT32,
blob_->mutable_cpu_diff());
PyArray_SetBaseObject(reinterpret_cast<PyArrayObject *>(obj), self_);
Py_INCREF(self_);
Expand Down Expand Up @@ -142,6 +143,7 @@ struct CaffeNet {
}

CaffeNet(string param_file, string pretrained_param_file) {
//cout<< " >>>>>>>>>>>>>>>>>> _caffe.cpp 145 CaffeNet" << param_file << " " << pretrained_param_file << endl;
Init(param_file);
CheckFile(pretrained_param_file);
net_->CopyTrainedLayersFrom(pretrained_param_file);
Expand All @@ -151,6 +153,7 @@ struct CaffeNet {
: net_(net) {}

void Init(string param_file) {
//cout<< " >>>>>>>>>>>>>>>>>> _caffe.cpp 156 Init " << param_file << endl;
CheckFile(param_file);
net_.reset(new Net<float>(param_file));
}
Expand All @@ -160,7 +163,7 @@ struct CaffeNet {

// Generate Python exceptions for badly shaped or discontiguous arrays.
inline void check_contiguous_array(PyArrayObject* arr, string name,
int channels, int height, int width) {
int channels, int height, int width, int length) {
if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) {
throw std::runtime_error(name + " must be C contiguous");
}
Expand All @@ -179,6 +182,10 @@ struct CaffeNet {
if (PyArray_DIMS(arr)[3] != width) {
throw std::runtime_error(name + " has wrong width");
}
if (PyArray_DIMS(arr)[4] != length) {
throw std::runtime_error(name + " has wrong length");
}

}

void Forward() {
Expand All @@ -204,8 +211,8 @@ struct CaffeNet {
PyArrayObject* labels_arr =
reinterpret_cast<PyArrayObject*>(labels_obj.ptr());
check_contiguous_array(data_arr, "data array", md_layer->datum_channels(),
md_layer->datum_height(), md_layer->datum_width());
check_contiguous_array(labels_arr, "labels array", 1, 1, 1);
md_layer->datum_height(), md_layer->datum_width(), md_layer->datum_length());
check_contiguous_array(labels_arr, "labels array", 1, 1, 1,1);
if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) {
throw std::runtime_error("data and labels must have the same first"
" dimension");
Expand Down Expand Up @@ -331,7 +338,9 @@ BOOST_PYTHON_MODULE(_caffe) {
.add_property("num", &CaffeBlob::num)
.add_property("channels", &CaffeBlob::channels)
.add_property("height", &CaffeBlob::height)
.add_property("length", &CaffeBlob::length)
.add_property("width", &CaffeBlob::width)
//.add_property("length", &CaffeBlob::width) // added something
.add_property("count", &CaffeBlob::count)
.add_property("data", &CaffeBlobWrap::get_data)
.add_property("diff", &CaffeBlobWrap::get_diff);
Expand Down
2 changes: 2 additions & 0 deletions python/caffe/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def __init__(self, model_file, pretrained_file, image_dims=None,
gpu, mean_file, input_scale, channel_swap: convenience params for
setting mode, mean, input scale, and channel order.
"""
#print ">>>>>>>>>>>>>>>> classifier __init__ "
caffe.Net.__init__(self, model_file, pretrained_file)
self.set_phase_test()

Expand Down Expand Up @@ -56,6 +57,7 @@ def predict(self, inputs, oversample=True):
predictions: (N x C) ndarray of class probabilities
for N images and C classes.
"""
#print ">>>>>>>>>>>>>>>> classifier predict "
# Scale to standardize input dimensions.
inputs = np.asarray([caffe.io.resize_image(im, self.image_dims)
for im in inputs])
Expand Down
3 changes: 3 additions & 0 deletions python/caffe/detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def __init__(self, model_file, pretrained_file, gpu=False, mean_file=None,
gpu, mean_file, input_scale, channel_swap: convenience params for
setting mode, mean, input scale, and channel order.
"""
print ">>>>>>>>>>>>>>>> Detector __init__ "
caffe.Net.__init__(self, model_file, pretrained_file)
self.set_phase_test()

Expand Down Expand Up @@ -63,6 +64,7 @@ def detect_windows(self, images_windows):
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
print ">>>>>>>>>>>>>>>> Detector detect_windows"
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
Expand Down Expand Up @@ -103,6 +105,7 @@ def detect_selective_search(self, image_fnames):
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
print ">>>>>>>>>>>>>>>> Detector detect_selective_search"
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
Expand Down
Binary file added python/caffe/detector.pyc
Binary file not shown.
20 changes: 18 additions & 2 deletions python/caffe/pycaffe.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def _Net_params(self):
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
#print ">>>>>>>>>>> _Net_params";
return OrderedDict([(lr.name, lr.blobs) for lr in self.layers
if len(lr.blobs) > 0])

Expand All @@ -48,6 +49,7 @@ def _Net_forward(self, blobs=None, **kwargs):
Give
outs: {blob name: blob ndarray} dict.
"""
#print ">>>>>>>>>>> _Net_forward";
if blobs is None:
blobs = []

Expand All @@ -59,10 +61,14 @@ def _Net_forward(self, blobs=None, **kwargs):
for in_, blob in kwargs.iteritems():
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
if blob.ndim != 4:
if blob.ndim != 5:
raise Exception('{} blob is not 4-d'.format(in_))
print "input blob.ndim = ", blob.ndim
print "nnz input blob = ", np.count_nonzero(np.array(blob))
self.blobs[in_].data[...] = blob

#print "blobs shape = ", self.blobs[in_].data.shape
print "network blobs shape = ", self.blobs['data'].data.shape
print "network blobs nnz = ", np.count_nonzero(np.array(self.blobs['data'].data))
self._forward()

# Unpack blobs to extract
Expand All @@ -82,6 +88,7 @@ def _Net_backward(self, diffs=None, **kwargs):
Give
outs: {blob name: diff ndarray} dict.
"""
print ">>>>>>>>>>> _Net_backward";
if diffs is None:
diffs = []

Expand Down Expand Up @@ -116,6 +123,7 @@ def _Net_forward_all(self, blobs=None, **kwargs):
Give
all_outs: {blob name: list of blobs} dict.
"""
#print ">>>>>>>>>>> _Net_forward_all";
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
Expand Down Expand Up @@ -148,6 +156,7 @@ def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
#print ">>>>>>>>>>> _Net_forward_backward_all";
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
Expand Down Expand Up @@ -186,6 +195,7 @@ def _Net_set_mean(self, input_, mean_f, mode='elementwise'):
mode: elementwise = use the whole mean (and check dimensions)
channel = channel constant (e.g. mean pixel instead of mean image)
"""
#print ">>>>>>>>>>> _Net_set_mean";
if not hasattr(self, 'mean'):
self.mean = {}
if input_ not in self.inputs:
Expand Down Expand Up @@ -215,6 +225,7 @@ def _Net_set_input_scale(self, input_, scale):
input_: which input to assign this scale factor
scale: scale coefficient
"""
#print ">>>>>>>>>>> _Net_set_input_scale";
if not hasattr(self, 'input_scale'):
self.input_scale = {}
if input_ not in self.inputs:
Expand All @@ -232,6 +243,7 @@ def _Net_set_channel_swap(self, input_, order):
order: the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
#print ">>>>>>>>>>> _Net_set_channel_swap";
if not hasattr(self, 'channel_swap'):
self.channel_swap = {}
if input_ not in self.inputs:
Expand All @@ -256,6 +268,7 @@ def _Net_preprocess(self, input_name, input_):
Give
caffe_inputs: (K x H x W) ndarray
"""
#print ">>>>>>>>>>> _Net_preprocess";
caffe_in = input_.astype(np.float32)
input_scale = self.input_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
Expand All @@ -277,6 +290,7 @@ def _Net_deprocess(self, input_name, input_):
"""
Invert Caffe formatting; see Net.preprocess().
"""
#print ">>>>>>>>>>> _Net_deprocess";
decaf_in = input_.copy().squeeze()
input_scale = self.input_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
Expand All @@ -298,6 +312,7 @@ def _Net_set_input_arrays(self, data, labels):
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
#print ">>>>>>>>>>> _Net_set_input_arrays";
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
Expand All @@ -315,6 +330,7 @@ def _Net_batch(self, blobs):
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
#print ">>>>>>>>>>> _Net_batch";
num = len(blobs.itervalues().next())
batch_size = self.blobs.itervalues().next().num
remainder = num % batch_size
Expand Down
10 changes: 9 additions & 1 deletion src/caffe/blob.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"

#include <iostream>
using namespace std;
namespace caffe {

template <typename Dtype>
Expand All @@ -24,6 +25,13 @@ void Blob<Dtype>::Reshape(const int num, const int channels, const int length, c
height_ = height;
width_ = width;
count_ = num_ * channels_ * length_ * height_ * width_;
/* cout << ">>>>>>>>>>>> Testing Count = "<< num_ << endl;
cout << ">>>>>>>>>>>> Testing Count = "<< channels_ << endl;
cout << ">>>>>>>>>>>> Testing Count = "<< length_ << endl;
cout << ">>>>>>>>>>>> Testing Count = "<< height_ << endl;
cout << ">>>>>>>>>>>> Testing Count = "<< width_ << endl;
cout << ">>>>>>>>>>>> Testing Count = "<< count_ << endl; */

if (count_) {
data_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
Expand Down
3 changes: 3 additions & 0 deletions src/caffe/layers/inner_product_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,16 @@ void InnerProductLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
} else {
this->blobs_.resize(1);
}

// Initialize the weight
this->blobs_[0].reset(new Blob<Dtype>(1, 1, 1, N_, K_));
//LOG(INFO) << "HERE ";
// fill the weights
shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
this->layer_param_.inner_product_param().weight_filler()));
weight_filler->Fill(this->blobs_[0].get());
// If necessary, intiialize and fill the bias term
//LOG(INFO) << "HERE111 ";
if (bias_term_) {
this->blobs_[1].reset(new Blob<Dtype>(1, 1, 1, 1, N_));
shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
Expand Down
10 changes: 5 additions & 5 deletions src/caffe/layers/memory_data_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@ void MemoryDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
datum_channels_ = this->layer_param_.memory_data_param().channels();
datum_height_ = this->layer_param_.memory_data_param().height();
datum_width_ = this->layer_param_.memory_data_param().width();
datum_size_ = datum_channels_ * datum_height_ * datum_width_;
CHECK_GT(batch_size_ * datum_size_, 0) << "batch_size, channels, height,"
datum_length_ = this->layer_param_.memory_data_param().width(); ///// No idea how to insert .length() here. // gives error
datum_size_ = datum_channels_ * datum_height_ * datum_width_ * datum_length_;
CHECK_GT(batch_size_ * datum_size_, 0) << "batch_size, channels, height,length"
" and width must be specified and positive in memory_data_param";
(*top)[0]->Reshape(batch_size_, datum_channels_, 1, datum_height_, datum_width_);
(*top)[0]->Reshape(batch_size_, datum_channels_, datum_length_, datum_height_, datum_width_);
(*top)[1]->Reshape(batch_size_, 1, 1, 1, 1);
data_ = NULL;
labels_ = NULL;
Expand All @@ -41,8 +42,7 @@ Dtype MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset";
(*top)[0]->set_cpu_data(data_ + pos_ * datum_size_);
(*top)[1]->set_cpu_data(labels_ + pos_);
pos_ = (pos_ + batch_size_) % n_;
(*top)[1]->set_cpu_data(labels_ + pos_); pos_ = (pos_ + batch_size_) % n_;
return Dtype(0.);
}

Expand Down
Loading