30 #include "modules/planning/proto/learning_data.pb.h" 31 #include "modules/planning/proto/planning_semantic_map_config.pb.h" 32 #include "opencv2/opencv.hpp" 48 bool Init(
const PlanningSemanticMapConfig& config);
56 cv::Mat* img_feature);
63 bool RenderBGREnv(
const LearningDataFrame& learning_data_frame,
64 cv::Mat* img_feature);
72 cv::Mat* img_feature);
80 cv::Mat* img_feature);
88 cv::Mat* img_feature);
95 bool LoadRoadMap(
const std::string& map_file);
101 bool LoadSpeedlimitMap(
const std::string& map_file);
110 bool RenderLocalRoadMap(
const double ego_current_x,
111 const double ego_current_y,
112 const double ego_current_heading,
113 cv::Mat* img_feature);
122 bool RenderLocalSpeedlimitMap(
const double ego_current_x,
123 const double ego_current_y,
124 const double ego_current_heading,
125 cv::Mat* img_feature);
133 bool RenderEgoCurrentPoint(cv::Mat* img_feature,
134 const cv::Scalar& gray_scale = cv::Scalar(255),
135 const cv::Scalar& bgr_color = cv::Scalar(255, 255,
144 bool RenderEgoCurrentBox(cv::Mat* img_feature,
145 const cv::Scalar& gray_scale = cv::Scalar(255),
146 const cv::Scalar& bgr_color = cv::Scalar(255, 255,
160 bool RenderEgoPastPoint(
161 const LearningDataFrame& learning_data_frame,
162 const double current_time_sec,
const double ego_current_x,
163 const double ego_current_y,
const double ego_current_heading,
164 cv::Mat* img_feature,
const cv::Scalar& gray_scale = cv::Scalar(255),
165 const cv::Scalar& bgr_color = cv::Scalar(255, 255, 255));
177 bool RenderObsPastBox(
const LearningDataFrame& learning_data_frame,
178 const double current_time_sec, cv::Mat* img_feature,
179 const cv::Scalar& gray_scale = cv::Scalar(255),
180 const cv::Scalar& bgr_color = cv::Scalar(0, 255, 0));
192 bool RenderObsFutureBox(
const LearningDataFrame& learning_data_frame,
193 const double current_time_sec, cv::Mat* img_feature,
194 const cv::Scalar& gray_scale = cv::Scalar(255),
195 const cv::Scalar& bgr_color = cv::Scalar(0, 0, 255));
208 bool RenderTrafficLight(
209 const LearningDataFrame& learning_data_frame,
const double ego_current_x,
210 const double ego_current_y,
const double ego_current_heading,
211 cv::Mat* img_feature,
const cv::Scalar& gray_scale = cv::Scalar(255),
212 const cv::Scalar& bgr_color = cv::Scalar(255, 255, 255));
224 bool RenderRouting(
const LearningDataFrame& learning_data_frame,
225 const double ego_current_x,
const double ego_current_y,
226 const double ego_current_heading, cv::Mat* img_feature,
227 const cv::Scalar& gray_scale = cv::Scalar(255),
228 const cv::Scalar& bgr_color = cv::Scalar(255, 255, 255));
238 bool CropByPose(
const double ego_x,
const double ego_y,
239 const double ego_heading,
const cv::Mat& base_map,
240 cv::Mat* img_feature);
251 cv::Point2i GetPointImgIdx(
const double local_point_x,
252 const double local_point_y,
253 const int center_point_idx_x,
254 const int center_point_idx_y);
265 cv::Point2i GetAffinedPointImgIdx(
const double point_x,
const double point_y,
266 const double center_x,
267 const double center_y,
const double theta);
281 std::vector<cv::Point2i> GetAffinedBoxImgIdx(
282 const double box_center_x,
const double box_center_y,
283 const double box_theta,
284 const std::vector<std::pair<double, double>>& box_corner_points,
285 const double center_x,
const double center_y,
const double theta);
287 PlanningSemanticMapConfig config_;
288 common::VehicleConfig ego_vehicle_config_;
289 cv::Mat base_roadmap_img_;
290 cv::Mat base_speedlimit_img_;
291 double map_bottom_left_point_x_ = 0.0;
292 double map_bottom_left_point_y_ = 0.0;
293 cv::Mat ego_cur_point_img_;
294 cv::Mat ego_cur_box_img_;
295 cv::Mat stacked_ego_cur_status_img_;
bool Init(const PlanningSemanticMapConfig &config)
initialize renderer
PlanningContext is the runtime context in planning. It is persistent across multiple frames...
Definition: atomic_hash_map.h:25
bool RenderBGREnv(const LearningDataFrame &learning_data_frame, cv::Mat *img_feature)
generate bgr img as input feature for certain model
Planning module main class. It processes GPS and IMU as input, to generate planning info...
#define DECLARE_SINGLETON(classname)
Definition: macros.h:52
virtual ~BirdviewImgFeatureRenderer()=default
Destructor.
bool RenderCurrentEgoPoint(const LearningDataFrame &learning_data_frame, cv::Mat *img_feature)
generate a single channel img
bool RenderCurrentEgoStatus(const LearningDataFrame &learning_data_frame, cv::Mat *img_feature)
generate a two channel img, one for cur box, one for cur point
bool RenderMultiChannelEnv(const LearningDataFrame &learning_data_frame, cv::Mat *img_feature)
generate multi-channel img as input feature for certain model
Definition: birdview_img_feature_renderer.h:37
bool RenderCurrentEgoBox(const LearningDataFrame &learning_data_frame, cv::Mat *img_feature)
generate a single channel img