54 #include "NvOnnxParser.h" 55 #include "torch/script.h" 56 #include "torch/torch.h" 68 namespace perception {
72 class Logger :
public nvinfer1::ILogger {
74 explicit Logger(Severity severity = Severity::kWARNING)
77 void log(Severity severity,
const char* msg)
override {
82 case Severity::kINTERNAL_ERROR:
83 std::cerr <<
"INTERNAL_ERROR: ";
85 case Severity::kERROR:
86 std::cerr <<
"ERROR: ";
88 case Severity::kWARNING:
89 std::cerr <<
"WARNING: ";
92 std::cerr <<
"INFO: ";
95 std::cerr <<
"UNKNOWN: ";
98 std::cerr << msg << std::endl;
106 friend class TestClass;
107 static const float kPillarXSize;
108 static const float kPillarYSize;
109 static const float kPillarZSize;
110 static const float kMinXRange;
111 static const float kMinYRange;
112 static const float kMinZRange;
113 static const float kMaxXRange;
114 static const float kMaxYRange;
115 static const float kMaxZRange;
116 static const int kNumClass;
117 static const int kMaxNumPillars;
118 static const int kMaxNumPointsPerPillar;
119 static const int kNumPointFeature;
120 static const int kGridXSize;
121 static const int kGridYSize;
122 static const int kGridZSize;
123 static const int kRpnInputSize;
124 static const int kNumAnchor;
125 static const int kNumOutputBoxFeature;
126 static const int kRpnBoxOutputSize;
127 static const int kRpnClsOutputSize;
128 static const int kRpnDirOutputSize;
129 static const int kBatchSize;
130 static const int kNumIndsForScan;
131 static const int kNumThreads;
134 static const int kNumBoxCorners;
135 static const std::vector<int> kAnchorStrides;
136 static const std::vector<int> kAnchorRanges;
137 static const std::vector<int> kNumAnchorSets;
138 static const std::vector<std::vector<float>> kAnchorDxSizes;
139 static const std::vector<std::vector<float>> kAnchorDySizes;
140 static const std::vector<std::vector<float>> kAnchorDzSizes;
141 static const std::vector<std::vector<float>> kAnchorZCoors;
142 static const std::vector<std::vector<int>> kNumAnchorRo;
143 static const std::vector<std::vector<float>> kAnchorRo;
146 const bool reproduce_result_mode_;
147 const float score_threshold_;
148 const float nms_overlap_threshold_;
149 const std::string pfe_onnx_file_;
150 const std::string rpn_onnx_file_;
151 const std::string pfe_torch_file_;
152 const std::string scattered_torch_file_;
153 const std::string backbone_torch_file_;
154 const std::string fpn_torch_file_;
155 const std::string bbox_head_torch_file_;
158 int host_pillar_count_[1];
168 float* box_anchors_min_x_;
169 float* box_anchors_min_y_;
170 float* box_anchors_max_x_;
171 float* box_anchors_max_y_;
175 float* dev_num_points_per_pillar_;
176 int* dev_sparse_pillar_map_;
177 int* dev_cumsum_along_x_;
178 int* dev_cumsum_along_y_;
180 float* dev_pillar_point_feature_;
181 float* dev_pillar_coors_;
183 float* dev_box_anchors_min_x_;
184 float* dev_box_anchors_min_y_;
185 float* dev_box_anchors_max_x_;
186 float* dev_box_anchors_max_y_;
187 int* dev_anchor_mask_;
189 void* pfe_buffers_[3];
190 void* rpn_buffers_[4];
192 float* dev_scattered_feature_;
194 float* dev_anchors_px_;
195 float* dev_anchors_py_;
196 float* dev_anchors_pz_;
197 float* dev_anchors_dx_;
198 float* dev_anchors_dy_;
199 float* dev_anchors_dz_;
200 float* dev_anchors_ro_;
201 float* dev_filtered_box_;
202 float* dev_filtered_score_;
203 int* dev_filtered_label_;
204 int* dev_filtered_dir_;
205 float* dev_box_for_nms_;
206 int* dev_filter_count_;
208 std::unique_ptr<PreprocessPoints> preprocess_points_ptr_;
209 std::unique_ptr<PreprocessPointsCuda> preprocess_points_cuda_ptr_;
210 std::unique_ptr<AnchorMaskCuda> anchor_mask_cuda_ptr_;
211 std::unique_ptr<PostprocessCuda> postprocess_cuda_ptr_;
214 nvinfer1::ICudaEngine* pfe_engine_;
215 nvinfer1::ICudaEngine* rpn_engine_;
216 nvinfer1::IExecutionContext* pfe_context_;
217 nvinfer1::IExecutionContext* rpn_context_;
221 torch::DeviceType device_type_;
222 torch::jit::script::Module pfe_net_;
223 torch::jit::script::Module scattered_net_;
224 torch::jit::script::Module backbone_net_;
225 torch::jit::script::Module fpn_net_;
226 torch::jit::script::Module bbox_head_net_;
232 void DeviceMemoryMalloc();
270 void GenerateAnchors(
float* anchors_px_,
float* anchors_py_,
271 float* anchors_pz_,
float* anchors_dx_,
272 float* anchors_dy_,
float* anchors_dz_,
281 void OnnxToTRTModel(
const std::string& model_file,
282 nvinfer1::ICudaEngine** engine_ptr);
290 void Preprocess(
const float* in_points_array,
const int in_num_points);
299 void PreprocessCPU(
const float* in_points_array,
const int in_num_points);
307 void PreprocessGPU(
const float* in_points_array,
const int in_num_points);
325 void ConvertAnchors2BoxAnchors(
float* anchors_px_,
float* anchors_py_,
326 float* box_anchors_min_x_,
327 float* box_anchors_min_y_,
328 float* box_anchors_max_x_,
329 float* box_anchors_max_y_);
335 void PutAnchorsInDeviceMemory();
351 PointPillars(
const bool reproduce_result_mode,
const float score_threshold,
352 const float nms_overlap_threshold,
353 const std::string& pfe_torch_file,
354 const std::string& scattered_torch_file,
355 const std::string& backbone_torch_file,
356 const std::string& fpn_torch_file,
357 const std::string& bbox_head_torch_file);
368 void DoInference(
const float* in_points_array,
const int in_num_points,
369 std::vector<float>* out_detections,
370 std::vector<int>* out_labels);
void log(Severity severity, const char *msg) override
Definition: point_pillars.h:77
Postprocess for network output.
Definition: point_pillars.h:104
PlanningContext is the runtime context in planning. It is persistent across multiple frames...
Definition: atomic_hash_map.h:25
CPU version of preprocess points.
GPU version of preprocess points.
Severity reportable_severity
Definition: point_pillars.h:101
Make anchor mask for filtering output.
Logger(Severity severity=Severity::kWARNING)
Definition: point_pillars.h:74
CUDA code for scatter operation.
Definition: point_pillars.h:72