mirror of
https://github.com/karma-riuk/hdr_esim.git
synced 2025-06-18 18:37:51 +02:00
initial commit
This commit is contained in:
4
event_camera_simulator/imp/imp_unrealcv_renderer/.gitignore
vendored
Normal file
4
event_camera_simulator/imp/imp_unrealcv_renderer/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
Debug
|
||||
.settings
|
||||
.cproject
|
||||
.project
|
@ -0,0 +1,22 @@
|
||||
cmake_minimum_required(VERSION 2.8.3)
|
||||
project(imp_unrealcv_renderer)
|
||||
|
||||
find_package(catkin_simple REQUIRED)
|
||||
find_package(OpenCV REQUIRED)
|
||||
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||
catkin_simple()
|
||||
|
||||
set(HEADERS
|
||||
include/esim/imp_unrealcv_renderer/utils.hpp
|
||||
include/esim/imp_unrealcv_renderer/unrealcv_renderer.hpp
|
||||
)
|
||||
|
||||
set(SOURCES
|
||||
src/unrealcv_renderer.cpp
|
||||
)
|
||||
|
||||
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||
target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES} ${CMAKE_DL_LIBS} -lpthread -lboost_filesystem -lboost_system)
|
||||
|
||||
cs_install()
|
||||
cs_export()
|
@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
|
||||
#include <esim/rendering/renderer_base.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
class UnrealCvClient; // fwd
|
||||
|
||||
class UnrealCvRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
|
||||
UnrealCvRenderer();
|
||||
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const;
|
||||
|
||||
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const
|
||||
{
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||
virtual bool canComputeOpticFlow() const override { return false; }
|
||||
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<UnrealCvClient> client_;
|
||||
mutable size_t frame_idx_;
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
#pragma once
|
||||
|
||||
#include <esim/common/types.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L580
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees
|
||||
**/
|
||||
inline FloatType clampAxis(FloatType angle)
|
||||
{
|
||||
// returns angle in the range (-360,360)
|
||||
angle = std::fmod(angle, 360.f);
|
||||
|
||||
if (angle < 0.f)
|
||||
{
|
||||
// shift to [0,360) range
|
||||
angle += 360.f;
|
||||
}
|
||||
|
||||
return angle;
|
||||
}
|
||||
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L595$
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees
|
||||
**/
|
||||
inline FloatType normalizeAxis(FloatType angle)
|
||||
{
|
||||
angle = clampAxis(angle);
|
||||
if(angle > 180.f)
|
||||
{
|
||||
// shift to (-180,180]
|
||||
angle -= 360.f;
|
||||
}
|
||||
|
||||
return angle;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540
|
||||
* Quaternion given in (x,y,z,w) representation
|
||||
**/
|
||||
void quaternionToEulerUnrealEngine(const Transformation::Rotation& q, FloatType& yaw, FloatType& pitch, FloatType& roll)
|
||||
{
|
||||
const FloatType X = q.x();
|
||||
const FloatType Y = q.y();
|
||||
const FloatType Z = q.z();
|
||||
const FloatType W = q.w();
|
||||
|
||||
const FloatType SingularityTest = Z*X-W*Y;
|
||||
const FloatType YawY = 2.f*(W*Z+X*Y);
|
||||
const FloatType YawX = (1.f-2.f*(Y*Y + Z*Z));
|
||||
|
||||
// reference
|
||||
// http://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
|
||||
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/
|
||||
|
||||
// this value was found from experience, the above websites recommend different values
|
||||
// but that isn't the case for us, so I went through different testing, and finally found the case
|
||||
// where both of world lives happily.
|
||||
const FloatType SINGULARITY_THRESHOLD = 0.4999995;
|
||||
const FloatType RAD_TO_DEG = (180.0)/CV_PI;
|
||||
|
||||
if (SingularityTest < -SINGULARITY_THRESHOLD)
|
||||
{
|
||||
pitch = -90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(-yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
}
|
||||
else if (SingularityTest > SINGULARITY_THRESHOLD)
|
||||
{
|
||||
pitch = 90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
}
|
||||
else
|
||||
{
|
||||
pitch = std::asin(2.f*(SingularityTest)) * RAD_TO_DEG;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = std::atan2(-2.f*(W*X+Y*Z), (1.f-2.f*(X*X + Y*Y))) * RAD_TO_DEG;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
21
event_camera_simulator/imp/imp_unrealcv_renderer/package.xml
Normal file
21
event_camera_simulator/imp/imp_unrealcv_renderer/package.xml
Normal file
@ -0,0 +1,21 @@
|
||||
<?xml version="1.0"?>
|
||||
<package format="2">
|
||||
<name>imp_unrealcv_renderer</name>
|
||||
<version>0.0.0</version>
|
||||
<description>Implementation of rendering engine based on Unreal Engine.</description>
|
||||
|
||||
<maintainer email="rptheiler@gmail.com">Raffael Theiler</maintainer>
|
||||
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||
|
||||
<license>GPLv3</license>
|
||||
|
||||
<buildtool_depend>catkin</buildtool_depend>
|
||||
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||
|
||||
<depend>esim_common</depend>
|
||||
<depend>esim_rendering</depend>
|
||||
<depend>esim_unrealcv_bridge</depend>
|
||||
<depend>gflags_catkin</depend>
|
||||
<depend>glog_catkin</depend>
|
||||
|
||||
</package>
|
@ -0,0 +1,165 @@
|
||||
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
|
||||
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
|
||||
#include <esim/imp_unrealcv_renderer/utils.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <ze/common/path_utils.hpp>
|
||||
#include <iomanip>
|
||||
|
||||
DEFINE_double(x_offset, 0, "X offset of the trajectory, in meters");
|
||||
DEFINE_double(y_offset, 0, "Y offset of the trajectory, in meters");
|
||||
DEFINE_double(z_offset, 0, "Z offset of the trajectory, in meters");
|
||||
DECLARE_double(trajectory_multiplier_x);
|
||||
DECLARE_double(trajectory_multiplier_y);
|
||||
DECLARE_double(trajectory_multiplier_z);
|
||||
|
||||
DEFINE_int32(unrealcv_post_median_blur, 0,
|
||||
"If > 0, median blur the raw UnrealCV images"
|
||||
"with a median filter of this size");
|
||||
|
||||
DEFINE_double(unrealcv_post_gaussian_blur_sigma, 0,
|
||||
"If sigma > 0, Gaussian blur the raw UnrealCV images"
|
||||
"with a Gaussian filter standard deviation sigma.");
|
||||
|
||||
DEFINE_string(unrealcv_output_directory, "",
|
||||
"Output directory in which to output the raw RGB images.");
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
UnrealCvRenderer::UnrealCvRenderer()
|
||||
{
|
||||
client_ = std::make_shared<UnrealCvClient>("localhost", "9000");
|
||||
frame_idx_ = 0;
|
||||
}
|
||||
|
||||
void UnrealCvRenderer::setCamera(const ze::Camera::Ptr& camera)
|
||||
{
|
||||
camera_ = camera;
|
||||
|
||||
// compute the horizontal field of view of the camera
|
||||
ze::VectorX intrinsics = camera_->projectionParameters();
|
||||
const FloatType fx = intrinsics(0);
|
||||
const FloatType hfov_deg = 2 * std::atan(0.5 * (FloatType) camera_->width() / fx) * 180. / CV_PI;
|
||||
|
||||
client_->setCameraFOV(static_cast<float>(hfov_deg));
|
||||
client_->setCameraSize(camera->width(), camera->height());
|
||||
}
|
||||
|
||||
|
||||
void UnrealCvRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
|
||||
{
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
VLOG(1) << "T_W_C (ZE) = " << T_W_C;
|
||||
|
||||
const Transformation::TransformationMatrix mT_ZE_C = T_W_C.getTransformationMatrix();
|
||||
Transformation::TransformationMatrix mT_UE_ZE;
|
||||
mT_UE_ZE << 1, 0, 0, 0,
|
||||
0, -1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1;
|
||||
|
||||
Transformation::TransformationMatrix mT_C_UEC;
|
||||
mT_C_UEC << 0, 1, 0, 0,
|
||||
0, 0, -1, 0,
|
||||
1, 0, 0, 0,
|
||||
0, 0, 0, 1;
|
||||
|
||||
// rotate 90 deg to the right so that R_W_C = Identity <=> euler angle (0,0,0) in UnrealCV
|
||||
// Transformation::TransformationMatrix mT_rotate_90_right;
|
||||
// mT_rotate_90_right << 0, -1, 0, 0,
|
||||
// 1, 0, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// mT_rotate_90_right << 1, 0, 0, 0,
|
||||
// 0, 1, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// const Transformation::TransformationMatrix mT_UE_UEC = mT_rotate_90_right * mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation::TransformationMatrix mT_UE_UEC = mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation T_UE_UEC(mT_UE_UEC);
|
||||
|
||||
VLOG(1) << "T_ZE_C = " << mT_ZE_C;
|
||||
VLOG(1) << "T_UE_UEC = " << T_UE_UEC;
|
||||
|
||||
FloatType yaw, pitch, roll;
|
||||
quaternionToEulerUnrealEngine(T_UE_UEC.getRotation(), yaw, pitch, roll);
|
||||
|
||||
const FloatType x_offset = static_cast<FloatType>(FLAGS_x_offset);
|
||||
const FloatType y_offset = static_cast<FloatType>(FLAGS_y_offset);
|
||||
const FloatType z_offset = static_cast<FloatType>(FLAGS_z_offset);
|
||||
|
||||
const FloatType x = 100. * (FLAGS_trajectory_multiplier_x * T_UE_UEC.getPosition()[0] + x_offset);
|
||||
const FloatType y = 100. * (FLAGS_trajectory_multiplier_y * T_UE_UEC.getPosition()[1] + y_offset);
|
||||
const FloatType z = 100. * (FLAGS_trajectory_multiplier_z * T_UE_UEC.getPosition()[2] + z_offset);
|
||||
|
||||
VLOG(1) << yaw << " " << pitch << " " << roll;
|
||||
|
||||
CameraData cam_data = {0,
|
||||
pitch,
|
||||
yaw,
|
||||
roll,
|
||||
x,
|
||||
y,
|
||||
z};
|
||||
|
||||
client_->setCamera(cam_data);
|
||||
cv::Mat img = client_->getImage(0);
|
||||
VLOG(5) << "Got image from the UnrealCV client";
|
||||
|
||||
// (optionally) save raw RGB image to the output directory
|
||||
if(FLAGS_unrealcv_output_directory != "")
|
||||
{
|
||||
std::stringstream ss_nr;
|
||||
ss_nr << std::setw(10) << std::setfill('0') << frame_idx_;
|
||||
std::string path_frame = ze::joinPath(FLAGS_unrealcv_output_directory, "frame_" + ss_nr.str() + ".png");
|
||||
VLOG(1) << "Saving raw RGB image to: " << path_frame;
|
||||
cv::imwrite(path_frame, img, {CV_IMWRITE_PNG_COMPRESSION, 9});
|
||||
}
|
||||
|
||||
cv::Mat img_gray;
|
||||
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
|
||||
|
||||
if(FLAGS_unrealcv_post_median_blur > 0)
|
||||
{
|
||||
cv::medianBlur(img_gray, img_gray, FLAGS_unrealcv_post_median_blur);
|
||||
}
|
||||
|
||||
if(FLAGS_unrealcv_post_gaussian_blur_sigma > 0)
|
||||
{
|
||||
cv::GaussianBlur(img_gray, img_gray, cv::Size(-1,-1), FLAGS_unrealcv_post_gaussian_blur_sigma);
|
||||
}
|
||||
|
||||
cv::resize(img_gray, img_gray, cv::Size(camera_->width(), camera_->height()));
|
||||
img_gray.convertTo(*out_image, cv::DataType<ImageFloatType>::type, 1./255.);
|
||||
|
||||
cv::Mat depth = client_->getDepth(0);
|
||||
VLOG(5) << "Got depth map from the UnrealCV client";
|
||||
CHECK_EQ(depth.type(), CV_32F);
|
||||
|
||||
cv::resize(depth, depth, cv::Size(camera_->width(), camera_->height()));
|
||||
depth.convertTo(*out_depthmap, cv::DataType<ImageFloatType>::type);
|
||||
|
||||
// the depth provided by the unrealcv client is the distance from the scene to the camera center,
|
||||
// we need to convert it to the distance to image plane (see Github issue: https://github.com/unrealcv/unrealcv/issues/14)
|
||||
const ImageFloatType yc = 0.5 * static_cast<ImageFloatType>(camera_->height()) - 1.0;
|
||||
const ImageFloatType xc = 0.5 * static_cast<ImageFloatType>(camera_->width()) - 1.0;
|
||||
const ImageFloatType f = static_cast<ImageFloatType>(camera_->projectionParameters()(0));
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
const ImageFloatType point_depth = (*out_depthmap)(y,x);
|
||||
const ImageFloatType dx = static_cast<ImageFloatType>(x) - xc;
|
||||
const ImageFloatType dy = static_cast<ImageFloatType>(y) - yc;
|
||||
const ImageFloatType distance_from_center = std::sqrt(dx*dx + dy*dy);
|
||||
const ImageFloatType plane_depth = point_depth / std::sqrt(1.0 + std::pow(distance_from_center / f, 2));
|
||||
(*out_depthmap)(y,x) = plane_depth;
|
||||
}
|
||||
}
|
||||
|
||||
frame_idx_++;
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
Reference in New Issue
Block a user