Skip to content

Commit

Permalink
Alpha_scaling_iron (#433)
Browse files Browse the repository at this point in the history
  • Loading branch information
Serafadam authored Oct 13, 2023
1 parent 5cf5dbe commit 10bc22a
Show file tree
Hide file tree
Showing 12 changed files with 85 additions and 21 deletions.
7 changes: 4 additions & 3 deletions depthai_bridge/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ if(POLICY CMP0057)
endif()

set(opencv_version 4)
find_package(OpenCV ${opencv_version} QUIET COMPONENTS imgproc highgui)
find_package(OpenCV ${opencv_version} QUIET COMPONENTS imgproc highgui calib3d)
if(NOT OpenCV_FOUND)
message(STATUS "----------------Did not find OpenCV 4, trying OpenCV 3--------------")
set(opencv_version 3)
find_package(OpenCV ${opencv_version} REQUIRED COMPONENTS imgproc highgui)
find_package(OpenCV ${opencv_version} REQUIRED COMPONENTS imgproc highgui calib3d)
endif()


Expand Down Expand Up @@ -93,7 +93,8 @@ endif()
target_link_libraries(${PROJECT_NAME}
depthai::core
opencv_imgproc
opencv_highgui)
opencv_highgui
opencv_calib3d)

ament_export_targets(depthai_bridgeTargets HAS_LIBRARY_TARGET)

Expand Down
8 changes: 8 additions & 0 deletions depthai_bridge/include/depthai_bridge/ImageConverter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,12 @@ class ImageConverter {
*/
void reverseStereoSocketOrder();

/**
* @brief Sets the alpha scaling factor for the image.
* @param alphaScalingFactor: The alpha scaling factor to be used.
*/
void setAlphaScaling(double alphaScalingFactor = 0.0);

void toRosMsg(std::shared_ptr<dai::ImgFrame> inData, std::deque<ImageMsgs::Image>& outImageMsgs);
ImageMsgs::Image toRosMsgRawPtr(std::shared_ptr<dai::ImgFrame> inData, const sensor_msgs::msg::CameraInfo& info = sensor_msgs::msg::CameraInfo());
ImagePtr toRosMsgPtr(std::shared_ptr<dai::ImgFrame> inData);
Expand Down Expand Up @@ -114,9 +120,11 @@ class ImageConverter {
bool _fromBitstream = false;
bool _convertDispToDepth = false;
bool _addExpOffset = false;
bool _alphaScalingEnabled = false;
dai::CameraExposureOffset _expOffset;
bool _reverseStereoSocketOrder = false;
double _baseline;
double _alphaScalingFactor = 0.0;
};

} // namespace ros
Expand Down
26 changes: 26 additions & 0 deletions depthai_bridge/src/ImageConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include "depthai_bridge/ImageConverter.hpp"

#include "depthai_bridge/depthaiUtility.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"

namespace dai {
Expand Down Expand Up @@ -59,6 +60,11 @@ void ImageConverter::reverseStereoSocketOrder() {
_reverseStereoSocketOrder = true;
}

void ImageConverter::setAlphaScaling(double alphaScalingFactor) {
_alphaScalingEnabled = true;
_alphaScalingFactor = alphaScalingFactor;
}

ImageMsgs::Image ImageConverter::toRosMsgRawPtr(std::shared_ptr<dai::ImgFrame> inData, const sensor_msgs::msg::CameraInfo& info) {
if(_updateRosBaseTimeOnToRosMsg) {
updateRosBaseTime();
Expand Down Expand Up @@ -375,6 +381,26 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo(
if(calibHandler.getStereoRightCameraId() == cameraId || calibHandler.getStereoLeftCameraId() == cameraId) {
std::vector<std::vector<float>> stereoIntrinsics = calibHandler.getCameraIntrinsics(
calibHandler.getStereoRightCameraId(), cameraData.width, cameraData.height, topLeftPixelId, bottomRightPixelId);

if(_alphaScalingEnabled) {
cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F);
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
cameraMatrix.at<double>(i, j) = stereoIntrinsics[i][j];
}
}
cv::Mat distCoefficients(distCoeffs);

cv::Mat newCameraMatrix = cv::getOptimalNewCameraMatrix(cameraMatrix, distCoefficients, cv::Size(width, height), _alphaScalingFactor);
// Copying the contents of newCameraMatrix to stereoIntrinsics
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
float newValue = static_cast<float>(newCameraMatrix.at<double>(i, j));
stereoIntrinsics[i][j] = newValue;
intrinsics[i * 3 + j] = newValue;
}
}
}
std::vector<double> stereoFlatIntrinsics(12), flatRectifiedRotation(9);
for(int i = 0; i < 3; i++) {
std::copy(stereoIntrinsics[i].begin(), stereoIntrinsics[i].end(), stereoFlatIntrinsics.begin() + 4 * i);
Expand Down
4 changes: 2 additions & 2 deletions depthai_descriptions/urdf/include/depthai_macro.urdf.xacro
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
<xacro:property name="has_imu" value="false" />
<xacro:property name="baseline" value="0.075" />

<xacro:if value="${model == 'OAK-D'}">
<xacro:if value="${model in ['OAK-D', 'OAK-D-PRO', 'OAK-D-POE']}">
<xacro:property name="has_imu" value="true" />
</xacro:if>

<xacro:base camera_name = "$(arg camera_name)" parent = "$(arg parent_frame)" camera_model = "$(arg camera_model)" base_frame = "$(arg base_frame)" cam_pos_x = "$(arg cam_pos_x)" cam_pos_y = "$(arg cam_pos_y)" cam_pos_z = "$(arg cam_pos_z)" cam_roll = "$(arg cam_roll)" cam_pitch = "$(arg cam_pitch)" cam_yaw = "$(arg cam_yaw)" has_imu="${has_imu}"/>
<xacro:base camera_name="${camera_name}" parent="${parent}" camera_model="${camera_model}" base_frame="${base_frame}" cam_pos_x="${cam_pos_x}" cam_pos_y="${cam_pos_y}" cam_pos_z="${cam_pos_z}" cam_roll="${cam_roll}" cam_pitch="${cam_pitch}" cam_yaw="${cam_yaw}" has_imu="${has_imu}"/>

<!-- RGB Camera -->
<link name="${camera_name}_rgb_camera_frame" />
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include "cv_bridge/cv_bridge.h"
#include "cv_bridge/cv_bridge.hpp"
#include "depthai_ros_msgs/msg/tracked_features.hpp"
#include "geometry_msgs/msg/point.hpp"
#include "message_filters/subscriber.h"
Expand Down
2 changes: 1 addition & 1 deletion depthai_filters/src/feature_tracker_overlay.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "depthai_filters/feature_tracker_overlay.hpp"

#include "cv_bridge/cv_bridge.h"
#include "cv_bridge/cv_bridge.hpp"
#include "depthai_filters/utils.hpp"

namespace depthai_filters {
Expand Down
2 changes: 1 addition & 1 deletion depthai_filters/src/features_3d.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "depthai_filters/features_3d.hpp"

#include "cv_bridge/cv_bridge.h"
#include "cv_bridge/cv_bridge.hpp"
#include "depthai_filters/utils.hpp"
#include "geometry_msgs/msg/point32.hpp"
#include "opencv2/opencv.hpp"
Expand Down
2 changes: 1 addition & 1 deletion depthai_filters/src/spatial_bb.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "depthai_filters/spatial_bb.hpp"

#include "cv_bridge/cv_bridge.h"
#include "cv_bridge/cv_bridge.hpp"
#include "depthai_filters/utils.hpp"
#include "geometry_msgs/msg/point32.hpp"
#include "opencv2/opencv.hpp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,13 @@ class PipelineGenerator {
*/
std::string validatePipeline(rclcpp::Node* node,const std::string& typeStr, int sensorNum);
/**
* @brief Creates the pipeline by using a plugin. Plugin types need to be of type depthai_ros_driver::pipeline_gen::BasePipeline.
*
* @param node The node
* @param device The device
* @param pipeline The pipeline
* @param[in] pipelineType The pipeline type name (plugin name or one of the default types)
* @param[in] nnType The neural network type (none, rgb, spatial)
* @param[in] enableImu Indicates if IMU is enabled
*
* @return Vector BaseNodes created.
Expand Down
6 changes: 3 additions & 3 deletions depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ namespace dai_nodes {
namespace sensor_helpers {

std::vector<ImageSensor> availableSensors = {{"IMX378", "1080P", {"12MP", "4K", "1080P"}, true},
{"OV9282", "800P", {"800P", "720P", "400P"}, false},
{"OV9782", "800P", {"800P", "720P", "400P"}, true},
{"OV9281", "800P", {"800P", "720P", "400P"}, true},
{"OV9282", "720P", {"800P", "720P", "400P"}, false},
{"OV9782", "720P", {"800P", "720P", "400P"}, true},
{"OV9281", "720P", {"800P", "720P", "400P"}, true},
{"IMX214", "1080P", {"13MP", "12MP", "4K", "1080P"}, true},
{"IMX412", "1080P", {"13MP", "12MP", "4K", "1080P"}, true},
{"OV7750", "480P", {"480P", "400P"}, false},
Expand Down
20 changes: 13 additions & 7 deletions depthai_ros_driver/src/dai_nodes/stereo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,9 @@ void Stereo::setupStereoQueue(std::shared_ptr<dai::Device> device) {
if(ph->getParam<bool>("i_reverse_stereo_socket_order")) {
stereoConv->reverseStereoSocketOrder();
}
if(ph->getParam<bool>("i_enable_alpha_scaling")) {
stereoConv->setAlphaScaling(ph->getParam<double>("i_alpha_scaling"));
}
stereoIM = std::make_shared<camera_info_manager::CameraInfoManager>(
getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName());
auto info = sensor_helpers::getCalibInfo(getROSNode()->get_logger(),
Expand All @@ -228,14 +231,17 @@ void Stereo::setupStereoQueue(std::shared_ptr<dai::Device> device) {
stereoConv->convertDispToDepth(calibHandler.getBaselineDistance(rightSensInfo.socket, leftSensInfo.socket, false));
}
}
// remove distortion since image is rectified
for(auto& d : info.d) {
d = 0.0;
}
for(auto& r : info.r) {
r = 0.0;
// remove distortion if alpha scaling is not enabled
if(!ph->getParam<bool>("i_enable_alpha_scaling")) {
for(auto& d : info.d) {
d = 0.0;
}
for(auto& r : info.r) {
r = 0.0;
}
info.r[0] = info.r[4] = info.r[8] = 1.0;
}
info.r[0] = info.r[4] = info.r[8] = 1.0;

stereoIM->setCameraInfo(info);
stereoQ = device->getOutputQueue(stereoQName, ph->getParam<int>("i_max_q_size"), false);
if(ipcEnabled()) {
Expand Down
22 changes: 20 additions & 2 deletions depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ void SensorParamHandler::declareParams(std::shared_ptr<dai::node::ColorCamera> c
resString = sensor.defaultResolution;
}

colorCam->setResolution(utils::getValFromMap(resString, dai_nodes::sensor_helpers::rgbResolutionMap));
int width = colorCam->getResolutionWidth();
int height = colorCam->getResolutionHeight();
colorCam->setResolution(utils::getValFromMap(resString, dai_nodes::sensor_helpers::rgbResolutionMap));

colorCam->setInterleaved(declareAndLogParam<bool>("i_interleaved", false));
if(declareAndLogParam<bool>("i_set_isp_scale", true)) {
Expand All @@ -114,7 +114,25 @@ void SensorParamHandler::declareParams(std::shared_ptr<dai::node::ColorCamera> c
RCLCPP_ERROR(getROSNode()->get_logger(), err_stream.str().c_str());
}
}
colorCam->setVideoSize(declareAndLogParam<int>("i_width", width), declareAndLogParam<int>("i_height", height));
int maxVideoWidth = 3840;
int maxVideoHeight = 2160;
int videoWidth = declareAndLogParam<int>("i_width", width);
int videoHeight = declareAndLogParam<int>("i_height", height);
if(videoWidth > maxVideoWidth) {
RCLCPP_WARN(getROSNode()->get_logger(),
"Video width %d is greater than max video width %d. Setting video width to max video width.",
videoWidth,
maxVideoWidth);
videoWidth = maxVideoWidth;
}
if(videoHeight > maxVideoHeight) {
RCLCPP_WARN(getROSNode()->get_logger(),
"Video height %d is greater than max video height %d. Setting video height to max video height.",
videoHeight,
maxVideoHeight);
videoHeight = maxVideoHeight;
}
colorCam->setVideoSize(videoWidth, videoHeight);
colorCam->setPreviewKeepAspectRatio(declareAndLogParam("i_keep_preview_aspect_ratio", true));
size_t iso = declareAndLogParam("r_iso", 800, getRangedIntDescriptor(100, 1600));
size_t exposure = declareAndLogParam("r_exposure", 20000, getRangedIntDescriptor(1, 33000));
Expand Down

0 comments on commit 10bc22a

Please sign in to comment.