diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 111d56e7269..2c23b456535 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -106,7 +106,7 @@ class FlattenLayer : public Layer { const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - int channels_out_; + int count_; }; diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 9ffe4d24a07..f2467444809 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -13,18 +13,20 @@ void FlattenLayer::SetUp(const vector*>& bottom, vector*>* top) { CHECK_EQ(bottom.size(), 1) << "Flatten Layer takes a single blob as input."; CHECK_EQ(top->size(), 1) << "Flatten Layer takes a single blob as output."; - channels_out_ = bottom[0]->channels() * bottom[0]->height() + int channels_out = bottom[0]->channels() * bottom[0]->height() * bottom[0]->width(); - (*top)[0]->Reshape(bottom[0]->num(), channels_out_, 1, 1); + (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); + count_ = bottom[0]->num() * channels_out; + CHECK_EQ(count_, bottom[0]->count()); + CHECK_EQ(count_, (*top)[0]->count()); }; template void FlattenLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { - const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = (*top)[0]->mutable_cpu_data(); - caffe_copy(channels_out_, bottom_data, top_data); + caffe_copy(count_, bottom_data, top_data); } template @@ -32,7 +34,7 @@ void FlattenLayer::Forward_gpu(const vector*>& bottom, vector*>* top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); - caffe_gpu_copy(channels_out_, bottom_data, top_data); + caffe_gpu_copy(count_, bottom_data, top_data); } template @@ -40,7 +42,7 @@ Dtype FlattenLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - caffe_copy(channels_out_, top_diff, bottom_diff); + caffe_copy(count_, top_diff, bottom_diff); } @@ -49,7 +51,7 @@ Dtype FlattenLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - caffe_gpu_copy(channels_out_, top_diff, bottom_diff); + caffe_gpu_copy(count_, top_diff, bottom_diff); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 23bce9d9675..b97e56aa995 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -58,6 +58,8 @@ TYPED_TEST(FlattenLayerTest, TestCPU) { for (int c = 0; c < 3 * 6 * 5; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); + EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), + this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5)); } }