Apollo  6.0
Open source self driving car software
blob.h
Go to the documentation of this file.
1 /******************************************************************************
2 COPYRIGHT
3 
4 All contributions by the University of California:
5 Copyright (c) 2014-2017 The Regents of the University of California (Regents)
6 All rights reserved.
7 
8 All other contributions:
9 Copyright (c) 2014-2017, the respective contributors
10 All rights reserved.
11 
12 Caffe uses a shared copyright model: each contributor holds copyright over
13 their contributions to Caffe. The project versioning records all such
14 contribution and copyright details. If a contributor wants to further mark
15 their specific copyright on a particular contribution, they should indicate
16 their copyright solely in the commit message of the change when it is
17 committed.
18 
19 LICENSE
20 
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions are met:
23 
24 1. Redistributions of source code must retain the above copyright notice, this
25  list of conditions and the following disclaimer.
26 2. Redistributions in binary form must reproduce the above copyright notice,
27  this list of conditions and the following disclaimer in the documentation
28  and/or other materials provided with the distribution.
29 
30 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
31 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
34 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
37 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 
41 CONTRIBUTION AGREEMENT
42 
43 By contributing to the BVLC/caffe repository through pull-request, comment,
44 or otherwise, the contributor releases their content to the
45 license and copyright terms herein.
46  *****************************************************************************/
47 
48 /******************************************************************************
49  * Copyright 2018 The Apollo Authors. All Rights Reserved.
50  *
51  * Licensed under the Apache License, Version 2.0 (the "License");
52  * you may not use this file except in compliance with the License.
53  * You may obtain a copy of the License at
54  *
55  * http://www.apache.org/licenses/LICENSE-2.0
56  *
57  * Unless required by applicable law or agreed to in writing, software
58  * distributed under the License is distributed on an "AS IS" BASIS,
59  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
60  * See the License for the specific language governing permissions and
61  * limitations under the License.
62  *****************************************************************************/
63 #pragma once
64 
65 #include <memory>
66 #include <string>
67 #include <vector>
68 
69 #include "absl/strings/str_cat.h"
70 #include "absl/strings/str_join.h"
71 
72 #include "cyber/common/log.h"
74 
75 namespace apollo {
76 namespace perception {
77 namespace base {
78 
79 constexpr size_t kMaxBlobAxes = 32;
80 
87 template <typename Dtype>
88 class Blob {
89  public:
90  Blob() : data_(), count_(0), capacity_(0), use_cuda_host_malloc_(false) {}
91  explicit Blob(bool use_cuda_host_malloc)
92  : data_(),
93  count_(0),
94  capacity_(0),
95  use_cuda_host_malloc_(use_cuda_host_malloc) {}
96 
98  Blob(const int num, const int channels, const int height, const int width,
99  const bool use_cuda_host_malloc = false);
100  explicit Blob(const std::vector<int>& shape,
101  const bool use_cuda_host_malloc = false);
102 
103  Blob(const Blob&) = delete;
104  void operator=(const Blob&) = delete;
105 
107  void Reshape(const int num, const int channels, const int height,
108  const int width);
123  void Reshape(const std::vector<int>& shape);
124  void ReshapeLike(const Blob& other);
125  inline std::string shape_string() const {
126  return shape_.empty()
127  ? absl::StrCat("(", count_, ")")
128  : absl::StrCat(absl::StrJoin(shape_, " "), " (", count_, ")");
129  }
130  inline const std::vector<int>& shape() const { return shape_; }
139  inline int shape(int index) const {
140  return shape_[CanonicalAxisIndex(index)];
141  }
142  inline int num_axes() const { return static_cast<int>(shape_.size()); }
143  inline int count() const { return count_; }
144 
153  inline int count(int start_axis, int end_axis) const {
154  CHECK_LE(start_axis, end_axis);
155  CHECK_GE(start_axis, 0);
156  CHECK_GE(end_axis, 0);
157  CHECK_LE(start_axis, num_axes());
158  CHECK_LE(end_axis, num_axes());
159  int count = 1;
160  for (int i = start_axis; i < end_axis; ++i) {
161  count *= shape(i);
162  }
163  return count;
164  }
171  inline int count(int start_axis) const {
172  return count(start_axis, num_axes());
173  }
174 
181  // const Blob<Dtype> operator ()(const std::vector<int> &roi_start,
182  // const std::vector<int> &roi_end) const {
183  // }
184 
196  inline int CanonicalAxisIndex(int axis_index) const {
197  CHECK_GE(axis_index, -num_axes())
198  << "axis " << axis_index << " out of range for " << num_axes()
199  << "-D Blob with shape " << shape_string();
200  CHECK_LT(axis_index, num_axes())
201  << "axis " << axis_index << " out of range for " << num_axes()
202  << "-D Blob with shape " << shape_string();
203  if (axis_index < 0) {
204  return axis_index + num_axes();
205  }
206  return axis_index;
207  }
208 
210  inline int num() const { return LegacyShape(0); }
212  inline int channels() const { return LegacyShape(1); }
214  inline int height() const { return LegacyShape(2); }
216  inline int width() const { return LegacyShape(3); }
217  inline int LegacyShape(int index) const {
218  CHECK_LE(num_axes(), 4)
219  << "Cannot use legacy accessors on Blobs with > 4 axes.";
220  CHECK_LT(index, 4);
221  CHECK_GE(index, -4);
222  if (index >= num_axes() || index < -num_axes()) {
223  // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
224  // indexing) -- this special case simulates the one-padding used to fill
225  // extraneous axes of legacy blobs.
226  return 1;
227  }
228  return shape(index);
229  }
230 
231  inline int offset(const int n, const int c = 0, const int h = 0,
232  const int w = 0) const {
233  CHECK_GE(n, 0);
234  CHECK_LE(n, num());
235  CHECK_GE(channels(), 0);
236  CHECK_LE(c, channels());
237  CHECK_GE(height(), 0);
238  CHECK_LE(h, height());
239  CHECK_GE(width(), 0);
240  CHECK_LE(w, width());
241  return ((n * channels() + c) * height() + h) * width() + w;
242  }
243 
244  inline int offset(const std::vector<int>& indices) const {
245  CHECK_LE(indices.size(), static_cast<size_t>(num_axes()));
246  int offset = 0;
247  for (int i = 0; i < num_axes(); ++i) {
248  offset *= shape(i);
249  if (static_cast<int>(indices.size()) > i) {
250  CHECK_GE(indices[i], 0);
251  CHECK_LT(indices[i], shape(i));
252  offset += indices[i];
253  }
254  }
255  return offset;
256  }
265  void CopyFrom(const Blob<Dtype>& source, bool reshape = false);
266 
267  inline Dtype data_at(const int n, const int c, const int h,
268  const int w) const {
269  return cpu_data()[offset(n, c, h, w)];
270  }
271 
272  inline Dtype data_at(const std::vector<int>& index) const {
273  return cpu_data()[offset(index)];
274  }
275 
276  inline const std::shared_ptr<SyncedMemory>& data() const {
277  ACHECK(data_);
278  return data_;
279  }
280 
281  const Dtype* cpu_data() const;
282  void set_cpu_data(Dtype* data);
283  const int* gpu_shape() const;
284  const Dtype* gpu_data() const;
285  void set_gpu_data(Dtype* data);
286  Dtype* mutable_cpu_data();
287  Dtype* mutable_gpu_data();
288  void set_head_gpu() { data_->set_head_gpu(); }
289  void set_head_cpu() { data_->set_head_cpu(); }
290  SyncedMemory::SyncedHead head() const { return data_->head(); }
291 
301  void ShareData(const Blob& other);
302 
303  protected:
304  std::shared_ptr<SyncedMemory> data_;
305  std::shared_ptr<SyncedMemory> shape_data_;
306  std::vector<int> shape_;
307  int count_;
310 }; // class Blob
311 
312 template <typename Dtype>
313 using BlobPtr = std::shared_ptr<Blob<Dtype>>;
314 template <typename Dtype>
315 using BlobConstPtr = std::shared_ptr<const Blob<Dtype>>;
316 
317 } // namespace base
318 } // namespace perception
319 } // namespace apollo
Dtype data_at(const std::vector< int > &index) const
Definition: blob.h:272
int channels() const
Deprecated legacy shape accessor channels: use shape(1) instead.
Definition: blob.h:212
int shape(int index) const
Returns the dimension of the index-th axis (or the negative index-th axis from the end...
Definition: blob.h:139
Dtype data_at(const int n, const int c, const int h, const int w) const
Definition: blob.h:267
#define ACHECK(cond)
Definition: log.h:80
void set_head_gpu()
Definition: blob.h:288
PlanningContext is the runtime context in planning. It is persistent across multiple frames...
Definition: atomic_hash_map.h:25
int offset(const int n, const int c=0, const int h=0, const int w=0) const
Definition: blob.h:231
SyncedHead
Definition: syncedmem.h:101
void ReshapeLike(const Blob &other)
int count() const
Definition: blob.h:143
int num() const
Deprecated legacy shape accessor num: use shape(0) instead.
Definition: blob.h:210
int height() const
Deprecated legacy shape accessor height: use shape(2) instead.
Definition: blob.h:214
void set_cpu_data(Dtype *data)
int CanonicalAxisIndex(int axis_index) const
create RoI Blob.
Definition: blob.h:196
std::string shape_string() const
Definition: blob.h:125
A wrapper around SyncedMemory holders serving as the basic computational unit for images...
Definition: blob.h:88
void Reshape(const int num, const int channels, const int height, const int width)
Deprecated; use Reshape(const std::vector<int>& shape).
Blob(bool use_cuda_host_malloc)
Definition: blob.h:91
std::vector< int > shape_
Definition: blob.h:306
void set_gpu_data(Dtype *data)
int count(int start_axis, int end_axis) const
Compute the volume of a slice; i.e., the product of dimensions among a range of axes.
Definition: blob.h:153
const Dtype * gpu_data() const
std::shared_ptr< const Blob< Dtype > > BlobConstPtr
Definition: blob.h:315
int count_
Definition: blob.h:307
int count(int start_axis) const
Compute the volume of a slice spanning from a particular first axis to the final axis.
Definition: blob.h:171
int LegacyShape(int index) const
Definition: blob.h:217
const Dtype * cpu_data() const
void CopyFrom(const Blob< Dtype > &source, bool reshape=false)
Copy from a source Blob.
std::shared_ptr< SyncedMemory > shape_data_
Definition: blob.h:305
std::shared_ptr< SyncedMemory > data_
Definition: blob.h:304
std::shared_ptr< Blob< Dtype > > BlobPtr
Definition: blob.h:313
constexpr size_t kMaxBlobAxes
Definition: blob.h:79
int offset(const std::vector< int > &indices) const
Definition: blob.h:244
const std::shared_ptr< SyncedMemory > & data() const
Definition: blob.h:276
void operator=(const Blob &)=delete
int width() const
Deprecated legacy shape accessor width: use shape(3) instead.
Definition: blob.h:216
void ShareData(const Blob &other)
Set the data_ std::shared_ptr to point to the SyncedMemory holding the data_ of Blob other – useful ...
void set_head_cpu()
Definition: blob.h:289
const int * gpu_shape() const
SyncedMemory::SyncedHead head() const
Definition: blob.h:290
const std::vector< int > & shape() const
Definition: blob.h:130
int num_axes() const
Definition: blob.h:142
int capacity_
Definition: blob.h:308
Blob()
Definition: blob.h:90
bool use_cuda_host_malloc_
Definition: blob.h:309