mirror of
https://github.com/karma-riuk/hdr_esim.git
synced 2025-06-21 03:38:11 +02:00
Reformated the project to make it more readable (to me)
This commit is contained in:
@ -1,35 +1,39 @@
|
||||
#pragma once
|
||||
|
||||
#include <esim/rendering/simple_renderer_base.hpp>
|
||||
#include <esim/imp_multi_objects_2d/object.hpp>
|
||||
#include <esim/rendering/simple_renderer_base.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! A rendering engine for planar scenes
|
||||
class MultiObject2DRenderer : public SimpleRenderer
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
//! A rendering engine for planar scenes
|
||||
class MultiObject2DRenderer : public SimpleRenderer {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
MultiObject2DRenderer();
|
||||
MultiObject2DRenderer();
|
||||
|
||||
~MultiObject2DRenderer();
|
||||
~MultiObject2DRenderer();
|
||||
|
||||
virtual bool render(const Time t,
|
||||
const ImagePtr& out_image,
|
||||
const OpticFlowPtr& optic_flow_map) const;
|
||||
virtual bool render(
|
||||
const Time t,
|
||||
const ImagePtr& out_image,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const;
|
||||
|
||||
virtual int getWidth() const { return width_; }
|
||||
virtual int getWidth() const {
|
||||
return width_;
|
||||
}
|
||||
|
||||
virtual int getHeight() const { return height_; }
|
||||
virtual int getHeight() const {
|
||||
return height_;
|
||||
}
|
||||
|
||||
protected:
|
||||
protected:
|
||||
void outputGroundTruthData();
|
||||
|
||||
void outputGroundTruthData();
|
||||
|
||||
std::vector<std::shared_ptr<Object>> objects_;
|
||||
int width_, height_;
|
||||
ze::real_t tmax_;
|
||||
};
|
||||
std::vector<std::shared_ptr<Object>> objects_;
|
||||
int width_, height_;
|
||||
ze::real_t tmax_;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -4,118 +4,136 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
bool loadTexture(const std::string& path_to_img, cv::Mat *img,
|
||||
double median_blur, double gaussian_blur);
|
||||
bool loadTexture(
|
||||
const std::string& path_to_img,
|
||||
cv::Mat* img,
|
||||
double median_blur,
|
||||
double gaussian_blur
|
||||
);
|
||||
|
||||
using Affine = cv::Matx<FloatType, 3,3>;
|
||||
using AffineWithJacobian = std::pair<Affine, Affine>;
|
||||
using Affine = cv::Matx<FloatType, 3, 3>;
|
||||
using AffineWithJacobian = std::pair<Affine, Affine>;
|
||||
|
||||
class MotionParameters
|
||||
{
|
||||
public:
|
||||
MotionParameters(FloatType tmax,
|
||||
FloatType theta0_deg, FloatType theta1_deg,
|
||||
FloatType x0, FloatType x1,
|
||||
FloatType y0, FloatType y1,
|
||||
FloatType sx0, FloatType sx1,
|
||||
FloatType sy0, FloatType sy1)
|
||||
: tmax_(tmax),
|
||||
x0_(x0),
|
||||
x1_(x1),
|
||||
y0_(y0),
|
||||
y1_(y1),
|
||||
theta0_(theta0_deg * CV_PI / 180.),
|
||||
theta1_(theta1_deg * CV_PI / 180.),
|
||||
sx0_(sx0),
|
||||
sx1_(sx1),
|
||||
sy0_(sy0),
|
||||
sy1_(sy1)
|
||||
{
|
||||
}
|
||||
class MotionParameters {
|
||||
public:
|
||||
MotionParameters(
|
||||
FloatType tmax,
|
||||
FloatType theta0_deg,
|
||||
FloatType theta1_deg,
|
||||
FloatType x0,
|
||||
FloatType x1,
|
||||
FloatType y0,
|
||||
FloatType y1,
|
||||
FloatType sx0,
|
||||
FloatType sx1,
|
||||
FloatType sy0,
|
||||
FloatType sy1
|
||||
)
|
||||
: tmax_(tmax),
|
||||
x0_(x0),
|
||||
x1_(x1),
|
||||
y0_(y0),
|
||||
y1_(y1),
|
||||
theta0_(theta0_deg * CV_PI / 180.),
|
||||
theta1_(theta1_deg * CV_PI / 180.),
|
||||
sx0_(sx0),
|
||||
sx1_(sx1),
|
||||
sy0_(sy0),
|
||||
sy1_(sy1) {}
|
||||
|
||||
AffineWithJacobian getAffineTransformationWithJacobian(ze::real_t t)
|
||||
{
|
||||
// constants
|
||||
const ze::real_t dtheta = theta1_ - theta0_;
|
||||
const ze::real_t dx = x1_ - x0_;
|
||||
const ze::real_t dy = y1_ - y0_;
|
||||
const ze::real_t dsx = sx1_ - sx0_;
|
||||
const ze::real_t dsy = sy1_ - sy0_;
|
||||
AffineWithJacobian getAffineTransformationWithJacobian(ze::real_t t) {
|
||||
// constants
|
||||
const ze::real_t dtheta = theta1_ - theta0_;
|
||||
const ze::real_t dx = x1_ - x0_;
|
||||
const ze::real_t dy = y1_ - y0_;
|
||||
const ze::real_t dsx = sx1_ - sx0_;
|
||||
const ze::real_t dsy = sy1_ - sy0_;
|
||||
|
||||
// computation of parameter(t)
|
||||
const ze::real_t theta = theta0_ + t/tmax_ * dtheta;
|
||||
const ze::real_t x = x0_ + t/tmax_ * dx;
|
||||
const ze::real_t y = y0_ + t/tmax_ * dy;
|
||||
const ze::real_t sx = sx0_ + t/tmax_ * dsx;
|
||||
const ze::real_t sy = sy0_ + t/tmax_ * dsy;
|
||||
const ze::real_t stheta = std::sin(theta);
|
||||
const ze::real_t ctheta = std::cos(theta);
|
||||
// computation of parameter(t)
|
||||
const ze::real_t theta = theta0_ + t / tmax_ * dtheta;
|
||||
const ze::real_t x = x0_ + t / tmax_ * dx;
|
||||
const ze::real_t y = y0_ + t / tmax_ * dy;
|
||||
const ze::real_t sx = sx0_ + t / tmax_ * dsx;
|
||||
const ze::real_t sy = sy0_ + t / tmax_ * dsy;
|
||||
const ze::real_t stheta = std::sin(theta);
|
||||
const ze::real_t ctheta = std::cos(theta);
|
||||
|
||||
Affine A;
|
||||
A << sx * ctheta, -sy * stheta, x,
|
||||
sx * stheta, sy * ctheta, y,
|
||||
0, 0, 1;
|
||||
Affine A;
|
||||
A << sx * ctheta, -sy * stheta, x, sx * stheta, sy * ctheta, y, 0,
|
||||
0, 1;
|
||||
|
||||
// computation of dparameter_dt(t)
|
||||
const ze::real_t dtheta_dt = 1./tmax_ * dtheta;
|
||||
const ze::real_t dx_dt = 1./tmax_ * dx;
|
||||
const ze::real_t dy_dt = 1./tmax_ * dy;
|
||||
const ze::real_t dsx_dt = 1./tmax_ * dsx;
|
||||
const ze::real_t dsy_dt = 1./tmax_ * dsy;
|
||||
// computation of dparameter_dt(t)
|
||||
const ze::real_t dtheta_dt = 1. / tmax_ * dtheta;
|
||||
const ze::real_t dx_dt = 1. / tmax_ * dx;
|
||||
const ze::real_t dy_dt = 1. / tmax_ * dy;
|
||||
const ze::real_t dsx_dt = 1. / tmax_ * dsx;
|
||||
const ze::real_t dsy_dt = 1. / tmax_ * dsy;
|
||||
|
||||
cv::Matx<FloatType, 3, 3> dAdt;
|
||||
dAdt << dsx_dt * ctheta - dtheta_dt * stheta * sx, -dsy_dt * stheta - dtheta_dt * ctheta * sy, dx_dt,
|
||||
dsx_dt * stheta + dtheta_dt * ctheta * sx, dsy_dt * ctheta - dtheta_dt * stheta * sy, dy_dt,
|
||||
0.0, 0.0, 0.0;
|
||||
cv::Matx<FloatType, 3, 3> dAdt;
|
||||
dAdt << dsx_dt * ctheta - dtheta_dt * stheta * sx,
|
||||
-dsy_dt * stheta - dtheta_dt * ctheta * sy, dx_dt,
|
||||
dsx_dt * stheta + dtheta_dt * ctheta * sx,
|
||||
dsy_dt * ctheta - dtheta_dt * stheta * sy, dy_dt, 0.0, 0.0, 0.0;
|
||||
|
||||
return AffineWithJacobian(A, dAdt);
|
||||
}
|
||||
return AffineWithJacobian(A, dAdt);
|
||||
}
|
||||
|
||||
FloatType tmax_;
|
||||
FloatType x0_, x1_;
|
||||
FloatType y0_, y1_;
|
||||
FloatType theta0_, theta1_;
|
||||
FloatType sx0_, sx1_;
|
||||
FloatType sy0_, sy1_;
|
||||
};
|
||||
FloatType tmax_;
|
||||
FloatType x0_, x1_;
|
||||
FloatType y0_, y1_;
|
||||
FloatType theta0_, theta1_;
|
||||
FloatType sx0_, sx1_;
|
||||
FloatType sy0_, sy1_;
|
||||
};
|
||||
|
||||
class Object
|
||||
{
|
||||
public:
|
||||
Object(const std::string path_to_texture, const cv::Size& dst_size, const MotionParameters& motion_params,
|
||||
double median_blur, double gaussian_blur);
|
||||
class Object {
|
||||
public:
|
||||
Object(
|
||||
const std::string path_to_texture,
|
||||
const cv::Size& dst_size,
|
||||
const MotionParameters& motion_params,
|
||||
double median_blur,
|
||||
double gaussian_blur
|
||||
);
|
||||
|
||||
void draw(Time t, bool is_first_layer = false);
|
||||
void draw(Time t, bool is_first_layer = false);
|
||||
|
||||
cv::Mat canvas_;
|
||||
OpticFlow flow_;
|
||||
cv::Mat canvas_;
|
||||
OpticFlow flow_;
|
||||
|
||||
MotionParameters getMotionParameters() const { return p_; }
|
||||
MotionParameters getMotionParameters() const {
|
||||
return p_;
|
||||
}
|
||||
|
||||
Affine getK0() const { return K0_; }
|
||||
Affine getK1() const { return K1_; }
|
||||
Affine getK0() const {
|
||||
return K0_;
|
||||
}
|
||||
|
||||
private:
|
||||
cv::Mat texture_;
|
||||
cv::Size dst_size_;
|
||||
MotionParameters p_;
|
||||
Affine getK1() const {
|
||||
return K1_;
|
||||
}
|
||||
|
||||
Affine K0_, K1_;
|
||||
};
|
||||
private:
|
||||
cv::Mat texture_;
|
||||
cv::Size dst_size_;
|
||||
MotionParameters p_;
|
||||
|
||||
void getIntensityAndAlpha(const cv::Mat& image,
|
||||
int x, int y,
|
||||
ImageFloatType* intensity,
|
||||
ImageFloatType* alpha);
|
||||
Affine K0_, K1_;
|
||||
};
|
||||
|
||||
inline ImageFloatType bgrToGrayscale(uchar b,
|
||||
uchar g,
|
||||
uchar r)
|
||||
{
|
||||
// https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/
|
||||
return 0.07 * static_cast<ImageFloatType>(b)
|
||||
+ 0.72 * static_cast<ImageFloatType>(g)
|
||||
+ 0.21 * static_cast<ImageFloatType>(r);
|
||||
}
|
||||
void getIntensityAndAlpha(
|
||||
const cv::Mat& image,
|
||||
int x,
|
||||
int y,
|
||||
ImageFloatType* intensity,
|
||||
ImageFloatType* alpha
|
||||
);
|
||||
|
||||
inline ImageFloatType bgrToGrayscale(uchar b, uchar g, uchar r) {
|
||||
// https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/
|
||||
return 0.07 * static_cast<ImageFloatType>(b)
|
||||
+ 0.72 * static_cast<ImageFloatType>(g)
|
||||
+ 0.21 * static_cast<ImageFloatType>(r);
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,11 +1,10 @@
|
||||
#include <esim/common/utils.hpp>
|
||||
#include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp>
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||
#include <glog/logging.h>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/core/eigen.hpp>
|
||||
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <ze/common/file_utils.hpp>
|
||||
#include <ze/common/time_conversions.hpp>
|
||||
|
||||
@ -13,268 +12,312 @@ DECLARE_double(renderer_preprocess_median_blur);
|
||||
|
||||
DECLARE_double(renderer_preprocess_gaussian_blur);
|
||||
|
||||
DEFINE_string(path_to_sequence_file, "",
|
||||
"Path to the sequence file that describes the 2D, multi-object scene.");
|
||||
DEFINE_string(
|
||||
path_to_sequence_file,
|
||||
"",
|
||||
"Path to the sequence file that describes the 2D, multi-object scene."
|
||||
);
|
||||
|
||||
DEFINE_string(path_to_output_folder, "",
|
||||
"Path to the output folder where the flow ground truth files will be written");
|
||||
DEFINE_string(
|
||||
path_to_output_folder,
|
||||
"",
|
||||
"Path to the output folder where the flow ground truth files "
|
||||
"will be written"
|
||||
);
|
||||
|
||||
DEFINE_bool(output_reverse_displacement_map, false,
|
||||
"Whether to output the reverse displacement map (i.e., the displacement field that maps pixels at t=tmax to pixels at t=t0");
|
||||
DEFINE_bool(
|
||||
output_reverse_displacement_map,
|
||||
false,
|
||||
"Whether to output the reverse displacement map (i.e., the "
|
||||
"displacement field that maps pixels at t=tmax to pixels at t=t0"
|
||||
);
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
MultiObject2DRenderer::MultiObject2DRenderer()
|
||||
{
|
||||
// Load sequence from file
|
||||
const std::string path_to_sequence_file = FLAGS_path_to_sequence_file;
|
||||
MultiObject2DRenderer::MultiObject2DRenderer() {
|
||||
// Load sequence from file
|
||||
const std::string path_to_sequence_file = FLAGS_path_to_sequence_file;
|
||||
|
||||
std::ifstream sequence_file;
|
||||
ze::openFileStream(path_to_sequence_file, &sequence_file);
|
||||
std::ifstream sequence_file;
|
||||
ze::openFileStream(path_to_sequence_file, &sequence_file);
|
||||
|
||||
std::string path_to_texture;
|
||||
double median_blur, gaussian_blur, theta0, theta1, x0, x1, y0, y1, sx0, sx1, sy0, sy1;
|
||||
std::string path_to_texture;
|
||||
double median_blur, gaussian_blur, theta0, theta1, x0, x1, y0, y1, sx0,
|
||||
sx1, sy0, sy1;
|
||||
|
||||
sequence_file >> width_ >> height_ >> tmax_;
|
||||
sequence_file >> width_ >> height_ >> tmax_;
|
||||
|
||||
CHECK_GT(width_, 0);
|
||||
CHECK_GT(height_, 0);
|
||||
CHECK_GT(tmax_, 0);
|
||||
CHECK_GT(width_, 0);
|
||||
CHECK_GT(height_, 0);
|
||||
CHECK_GT(tmax_, 0);
|
||||
|
||||
VLOG(1) << "width = " << width_ << ", height = " << height_;
|
||||
VLOG(1) << "tmax = " << tmax_;
|
||||
VLOG(1) << "width = " << width_ << ", height = " << height_;
|
||||
VLOG(1) << "tmax = " << tmax_;
|
||||
|
||||
while(sequence_file >> path_to_texture
|
||||
>> median_blur >> gaussian_blur
|
||||
>> theta0 >> theta1
|
||||
>> x0 >> x1
|
||||
>> y0 >> y1
|
||||
>> sx0 >> sx1
|
||||
>> sy0 >> sy1)
|
||||
{
|
||||
MotionParameters params(tmax_,
|
||||
theta0, theta1,
|
||||
x0, x1,
|
||||
y0, y1,
|
||||
sx0, sx1,
|
||||
sy0, sy1);
|
||||
while (sequence_file >> path_to_texture >> median_blur >> gaussian_blur
|
||||
>> theta0 >> theta1 >> x0 >> x1 >> y0 >> y1 >> sx0 >> sx1 >> sy0
|
||||
>> sy1) {
|
||||
MotionParameters params(
|
||||
tmax_,
|
||||
theta0,
|
||||
theta1,
|
||||
x0,
|
||||
x1,
|
||||
y0,
|
||||
y1,
|
||||
sx0,
|
||||
sx1,
|
||||
sy0,
|
||||
sy1
|
||||
);
|
||||
|
||||
objects_.emplace_back(std::make_shared<Object>(path_to_texture,
|
||||
cv::Size(width_, height_),
|
||||
params,
|
||||
median_blur,
|
||||
gaussian_blur));
|
||||
}
|
||||
|
||||
|
||||
if(FLAGS_path_to_output_folder != "")
|
||||
{
|
||||
outputGroundTruthData();
|
||||
}
|
||||
}
|
||||
|
||||
MultiObject2DRenderer::~MultiObject2DRenderer()
|
||||
{
|
||||
}
|
||||
|
||||
bool MultiObject2DRenderer::render(const Time t,
|
||||
const ImagePtr& out_image,
|
||||
const OpticFlowPtr& optic_flow_map) const {
|
||||
|
||||
if(ze::nanosecToSecTrunc(t) > tmax_)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
CHECK_EQ(out_image->rows, height_);
|
||||
CHECK_EQ(out_image->cols, width_);
|
||||
|
||||
out_image->setTo(0);
|
||||
optic_flow_map->setTo(0.);
|
||||
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(t, is_first_layer);
|
||||
}
|
||||
|
||||
// composite the local images drawn by each object
|
||||
// start from bottom image, merge it with the upper one
|
||||
// then go one level up, merge the resulting image with the upper one
|
||||
// and so on...
|
||||
ImageFloatType intensity;
|
||||
ImageFloatType alpha;
|
||||
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
const std::shared_ptr<Object>& object = objects_[i];
|
||||
const cv::Mat& image = object->canvas_;
|
||||
const OpticFlow& flow = object->flow_;
|
||||
|
||||
for(int y=0; y<out_image->rows; ++y)
|
||||
{
|
||||
for(int x=0; x<out_image->cols; ++x)
|
||||
{
|
||||
getIntensityAndAlpha(image, x, y, &intensity, &alpha);
|
||||
(*out_image)(y,x) = alpha * intensity + (1.-alpha) * (*out_image)(y,x);
|
||||
|
||||
if(alpha > 0)
|
||||
{
|
||||
(*optic_flow_map)(y,x) = flow(y,x);
|
||||
objects_.emplace_back(std::make_shared<Object>(
|
||||
path_to_texture,
|
||||
cv::Size(width_, height_),
|
||||
params,
|
||||
median_blur,
|
||||
gaussian_blur
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAGS_path_to_output_folder != "")
|
||||
outputGroundTruthData();
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
MultiObject2DRenderer::~MultiObject2DRenderer() {}
|
||||
|
||||
void MultiObject2DRenderer::outputGroundTruthData()
|
||||
{
|
||||
// This function generates some ground truth information and output if to the FLAGS_path_to_output_folder
|
||||
// In principle, this information could be transmitted in a SimulationData structure and forwarded
|
||||
// to a Publisher object that writes it to disk.
|
||||
// However, for technical reasons, it is more convenient to write the displacement maps here, rather than integrate instantaneous
|
||||
// optic flow maps in a Publisher.
|
||||
bool MultiObject2DRenderer::render(
|
||||
const Time t,
|
||||
const ImagePtr& out_image,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const {
|
||||
if (ze::nanosecToSecTrunc(t) > tmax_)
|
||||
return true;
|
||||
|
||||
// The following ground truth information is computed below and output to the ground truth folder:
|
||||
// - Displacement maps from t=0 to t=tmax (and vice-versa)
|
||||
// - Images 0 and images 1
|
||||
CHECK_EQ(out_image->rows, height_);
|
||||
CHECK_EQ(out_image->cols, width_);
|
||||
|
||||
VLOG(1) << "Will output ground truth data to folder: " << FLAGS_path_to_output_folder;
|
||||
out_image->setTo(0);
|
||||
optic_flow_map->setTo(0.);
|
||||
|
||||
// Notation: the frame at time t=0 is denoted as frame 0, and the frame at time t=tmax is denoted as frame 1
|
||||
// Compute the entire displacement field from 0 to 1, for every layer
|
||||
const ze::real_t t0 = 0.0;
|
||||
const ze::real_t t1 = tmax_;
|
||||
|
||||
// Every entry in this vector is a displacement map that maps pixel locations in image 0
|
||||
// to the corresponding pixel location in image 1, for the layer i
|
||||
//
|
||||
// this individual displacement layers are then merged together to form the final
|
||||
// displacement map later
|
||||
//
|
||||
// Note: the reverse layer-wise displacement map can be computed as follows:
|
||||
// layer_displacement_01[i] = -layer_displacement_10[i]
|
||||
std::vector<OpticFlow> layer_displacements_10(objects_.size());
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
layer_displacements_10[i] = OpticFlow(height_, width_);
|
||||
const Object object = *(objects_[i]);
|
||||
|
||||
Affine A_t0_src = object.getMotionParameters().getAffineTransformationWithJacobian(t0).first;
|
||||
Affine A_t1_src = object.getMotionParameters().getAffineTransformationWithJacobian(t1).first;
|
||||
Affine A_t1_t0 = A_t1_src * A_t0_src.inv();
|
||||
|
||||
// M_t1_t0 maps any point on the first image to its position in the last image
|
||||
Affine M_t1_t0 = object.getK1() * A_t1_t0 * object.getK1().inv();
|
||||
|
||||
for(int y=0; y<height_; ++y)
|
||||
{
|
||||
for(int x=0; x<width_; ++x)
|
||||
{
|
||||
FloatType x_t1 = M_t1_t0(0,0) * x + M_t1_t0(0,1) * y + M_t1_t0(0,2);
|
||||
FloatType y_t1 = M_t1_t0(1,0) * x + M_t1_t0(1,1) * y + M_t1_t0(1,2);
|
||||
|
||||
FloatType displacement_x, displacement_y;
|
||||
// if the pixel went out of the field of view, we assign a displacement of 0 (convention)
|
||||
displacement_x = (x_t1 >= 0 && x_t1 < width_) ? (ImageFloatType) x_t1 - (ImageFloatType) x : 0.;
|
||||
displacement_y = (y_t1 >= 0 && y_t1 < height_) ? (ImageFloatType) y_t1 - (ImageFloatType) y : 0.;
|
||||
|
||||
layer_displacements_10[i](y,x)[0] = displacement_x;
|
||||
layer_displacements_10[i](y,x)[1] = displacement_y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ImageFloatType intensity, alpha;
|
||||
|
||||
// First, merge the displacement map from 0 to 1
|
||||
OpticFlow displacement_map_10(height_, width_); // displacement map from 0 to 1
|
||||
Image image0(height_, width_);
|
||||
image0.setTo(0);
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(t0, is_first_layer);
|
||||
}
|
||||
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
const std::shared_ptr<Object>& object = objects_[i];
|
||||
for(int y=0; y<height_; ++y)
|
||||
{
|
||||
for(int x=0; x<width_; ++x)
|
||||
{
|
||||
getIntensityAndAlpha(object->canvas_, x, y, &intensity, &alpha);
|
||||
image0(y,x) = alpha * intensity + (1.-alpha) * image0(y,x);
|
||||
if(alpha > 0)
|
||||
{
|
||||
displacement_map_10(y,x) = layer_displacements_10[i](y,x);
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(t, is_first_layer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second, create displacement map from 1 to 0
|
||||
OpticFlow displacement_map_01(height_, width_); // displacement map from 1 to 0
|
||||
Image image1(height_, width_);
|
||||
image1.setTo(0);
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(ze::secToNanosec(t1), is_first_layer);
|
||||
}
|
||||
// composite the local images drawn by each object
|
||||
// start from bottom image, merge it with the upper one
|
||||
// then go one level up, merge the resulting image with the upper one
|
||||
// and so on...
|
||||
ImageFloatType intensity;
|
||||
ImageFloatType alpha;
|
||||
|
||||
for(int i=0; i<objects_.size(); ++i)
|
||||
{
|
||||
for(int y=0; y<height_; ++y)
|
||||
{
|
||||
for(int x=0; x<width_; ++x)
|
||||
{
|
||||
getIntensityAndAlpha(objects_[i]->canvas_, x, y, &intensity, &alpha);
|
||||
image1(y,x) = alpha * intensity + (1.-alpha) * image1(y,x);
|
||||
if(alpha > 0)
|
||||
{
|
||||
displacement_map_01(y,x) = -layer_displacements_10[i](y,x);
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
const std::shared_ptr<Object>& object = objects_[i];
|
||||
const cv::Mat& image = object->canvas_;
|
||||
const OpticFlow& flow = object->flow_;
|
||||
|
||||
for (int y = 0; y < out_image->rows; ++y) {
|
||||
for (int x = 0; x < out_image->cols; ++x) {
|
||||
getIntensityAndAlpha(image, x, y, &intensity, &alpha);
|
||||
(*out_image)(y, x) =
|
||||
alpha * intensity + (1. - alpha) * (*out_image)(y, x);
|
||||
|
||||
if (alpha > 0)
|
||||
(*optic_flow_map)(y, x) = flow(y, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::ofstream displacement_file_10, displacement_file_01;
|
||||
ze::openOutputFileStream(ze::joinPath(FLAGS_path_to_output_folder, "displacement_10.txt"), &displacement_file_10);
|
||||
void MultiObject2DRenderer::outputGroundTruthData() {
|
||||
// This function generates some ground truth information and output if
|
||||
// to the FLAGS_path_to_output_folder In principle, this information
|
||||
// could be transmitted in a SimulationData structure and forwarded to a
|
||||
// Publisher object that writes it to disk. However, for technical
|
||||
// reasons, it is more convenient to write the displacement maps here,
|
||||
// rather than integrate instantaneous optic flow maps in a Publisher.
|
||||
|
||||
if(FLAGS_output_reverse_displacement_map)
|
||||
{
|
||||
ze::openOutputFileStream(ze::joinPath(FLAGS_path_to_output_folder, "displacement_01.txt"), &displacement_file_01);
|
||||
}
|
||||
// The following ground truth information is computed below and output
|
||||
// to the ground truth folder:
|
||||
// - Displacement maps from t=0 to t=tmax (and vice-versa)
|
||||
// - Images 0 and images 1
|
||||
|
||||
for(int y=0; y<height_; ++y)
|
||||
{
|
||||
for(int x=0; x<width_; ++x)
|
||||
{
|
||||
displacement_file_10 << displacement_map_10(y,x)[0] << " " << displacement_map_10(y,x)[1] << std::endl;
|
||||
VLOG(1) << "Will output ground truth data to folder: "
|
||||
<< FLAGS_path_to_output_folder;
|
||||
|
||||
if(FLAGS_output_reverse_displacement_map)
|
||||
{
|
||||
displacement_file_01 << displacement_map_01(y,x)[0] << " " << displacement_map_01(y,x)[1] << std::endl;
|
||||
}
|
||||
// Notation: the frame at time t=0 is denoted as frame 0, and the frame
|
||||
// at time t=tmax is denoted as frame 1 Compute the entire displacement
|
||||
// field from 0 to 1, for every layer
|
||||
const ze::real_t t0 = 0.0;
|
||||
const ze::real_t t1 = tmax_;
|
||||
|
||||
// Every entry in this vector is a displacement map that maps pixel
|
||||
// locations in image 0 to the corresponding pixel location in image 1,
|
||||
// for the layer i
|
||||
//
|
||||
// this individual displacement layers are then merged together to form
|
||||
// the final displacement map later
|
||||
//
|
||||
// Note: the reverse layer-wise displacement map can be computed as
|
||||
// follows:
|
||||
// layer_displacement_01[i] = -layer_displacement_10[i]
|
||||
std::vector<OpticFlow> layer_displacements_10(objects_.size());
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
layer_displacements_10[i] = OpticFlow(height_, width_);
|
||||
const Object object = *(objects_[i]);
|
||||
|
||||
Affine A_t0_src = object.getMotionParameters()
|
||||
.getAffineTransformationWithJacobian(t0)
|
||||
.first;
|
||||
Affine A_t1_src = object.getMotionParameters()
|
||||
.getAffineTransformationWithJacobian(t1)
|
||||
.first;
|
||||
Affine A_t1_t0 = A_t1_src * A_t0_src.inv();
|
||||
|
||||
// M_t1_t0 maps any point on the first image to its position in the
|
||||
// last image
|
||||
Affine M_t1_t0 = object.getK1() * A_t1_t0 * object.getK1().inv();
|
||||
|
||||
for (int y = 0; y < height_; ++y) {
|
||||
for (int x = 0; x < width_; ++x) {
|
||||
FloatType x_t1 =
|
||||
M_t1_t0(0, 0) * x + M_t1_t0(0, 1) * y + M_t1_t0(0, 2);
|
||||
FloatType y_t1 =
|
||||
M_t1_t0(1, 0) * x + M_t1_t0(1, 1) * y + M_t1_t0(1, 2);
|
||||
|
||||
FloatType displacement_x, displacement_y;
|
||||
// if the pixel went out of the field of view, we assign a
|
||||
// displacement of 0 (convention)
|
||||
displacement_x =
|
||||
(x_t1 >= 0 && x_t1 < width_)
|
||||
? (ImageFloatType) x_t1 - (ImageFloatType) x
|
||||
: 0.;
|
||||
displacement_y =
|
||||
(y_t1 >= 0 && y_t1 < height_)
|
||||
? (ImageFloatType) y_t1 - (ImageFloatType) y
|
||||
: 0.;
|
||||
|
||||
layer_displacements_10[i](y, x)[0] = displacement_x;
|
||||
layer_displacements_10[i](y, x)[1] = displacement_y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ImageFloatType intensity, alpha;
|
||||
|
||||
// First, merge the displacement map from 0 to 1
|
||||
OpticFlow displacement_map_10(
|
||||
height_,
|
||||
width_
|
||||
); // displacement map from 0 to 1
|
||||
Image image0(height_, width_);
|
||||
image0.setTo(0);
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(t0, is_first_layer);
|
||||
}
|
||||
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
const std::shared_ptr<Object>& object = objects_[i];
|
||||
for (int y = 0; y < height_; ++y) {
|
||||
for (int x = 0; x < width_; ++x) {
|
||||
getIntensityAndAlpha(
|
||||
object->canvas_,
|
||||
x,
|
||||
y,
|
||||
&intensity,
|
||||
&alpha
|
||||
);
|
||||
image0(y, x) =
|
||||
alpha * intensity + (1. - alpha) * image0(y, x);
|
||||
if (alpha > 0) {
|
||||
displacement_map_10(y, x) =
|
||||
layer_displacements_10[i](y, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second, create displacement map from 1 to 0
|
||||
OpticFlow displacement_map_01(
|
||||
height_,
|
||||
width_
|
||||
); // displacement map from 1 to 0
|
||||
Image image1(height_, width_);
|
||||
image1.setTo(0);
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
const bool is_first_layer = (i == 0);
|
||||
objects_[i]->draw(ze::secToNanosec(t1), is_first_layer);
|
||||
}
|
||||
|
||||
for (int i = 0; i < objects_.size(); ++i) {
|
||||
for (int y = 0; y < height_; ++y) {
|
||||
for (int x = 0; x < width_; ++x) {
|
||||
getIntensityAndAlpha(
|
||||
objects_[i]->canvas_,
|
||||
x,
|
||||
y,
|
||||
&intensity,
|
||||
&alpha
|
||||
);
|
||||
image1(y, x) =
|
||||
alpha * intensity + (1. - alpha) * image1(y, x);
|
||||
if (alpha > 0) {
|
||||
displacement_map_01(y, x) =
|
||||
-layer_displacements_10[i](y, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::ofstream displacement_file_10, displacement_file_01;
|
||||
ze::openOutputFileStream(
|
||||
ze::joinPath(FLAGS_path_to_output_folder, "displacement_10.txt"),
|
||||
&displacement_file_10
|
||||
);
|
||||
|
||||
if (FLAGS_output_reverse_displacement_map) {
|
||||
ze::openOutputFileStream(
|
||||
ze::joinPath(
|
||||
FLAGS_path_to_output_folder,
|
||||
"displacement_01.txt"
|
||||
),
|
||||
&displacement_file_01
|
||||
);
|
||||
}
|
||||
|
||||
for (int y = 0; y < height_; ++y) {
|
||||
for (int x = 0; x < width_; ++x) {
|
||||
displacement_file_10 << displacement_map_10(y, x)[0] << " "
|
||||
<< displacement_map_10(y, x)[1]
|
||||
<< std::endl;
|
||||
|
||||
if (FLAGS_output_reverse_displacement_map) {
|
||||
displacement_file_01 << displacement_map_01(y, x)[0] << " "
|
||||
<< displacement_map_01(y, x)[1]
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
displacement_file_10.close();
|
||||
if (FLAGS_output_reverse_displacement_map)
|
||||
displacement_file_01.close();
|
||||
|
||||
cv::Mat disp_image0, disp_image1;
|
||||
image0.convertTo(disp_image0, CV_8U, 255);
|
||||
image1.convertTo(disp_image1, CV_8U, 255);
|
||||
cv::imwrite(
|
||||
ze::joinPath(FLAGS_path_to_output_folder, "image0.png"),
|
||||
disp_image0
|
||||
);
|
||||
cv::imwrite(
|
||||
ze::joinPath(FLAGS_path_to_output_folder, "image1.png"),
|
||||
disp_image1
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
displacement_file_10.close();
|
||||
if(FLAGS_output_reverse_displacement_map)
|
||||
{
|
||||
displacement_file_01.close();
|
||||
}
|
||||
|
||||
cv::Mat disp_image0, disp_image1;
|
||||
image0.convertTo(disp_image0, CV_8U, 255);
|
||||
image1.convertTo(disp_image1, CV_8U, 255);
|
||||
cv::imwrite(ze::joinPath(FLAGS_path_to_output_folder, "image0.png"), disp_image0);
|
||||
cv::imwrite(ze::joinPath(FLAGS_path_to_output_folder, "image1.png"), disp_image1);
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,129 +1,131 @@
|
||||
#include <esim/imp_multi_objects_2d/object.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <ze/common/time_conversions.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
bool loadTexture(const std::string& path_to_img, cv::Mat* img,
|
||||
double median_blur, double gaussian_blur)
|
||||
{
|
||||
CHECK(img);
|
||||
VLOG(1) << "Loading texture file from file: " << path_to_img << ".";
|
||||
bool loadTexture(
|
||||
const std::string& path_to_img,
|
||||
cv::Mat* img,
|
||||
double median_blur,
|
||||
double gaussian_blur
|
||||
) {
|
||||
CHECK(img);
|
||||
VLOG(1) << "Loading texture file from file: " << path_to_img << ".";
|
||||
|
||||
*img = cv::imread(path_to_img, cv::IMREAD_UNCHANGED);
|
||||
*img = cv::imread(path_to_img, cv::IMREAD_UNCHANGED);
|
||||
|
||||
if(!img->data)
|
||||
{
|
||||
LOG(FATAL) << "Could not open image at: " << path_to_img << ".";
|
||||
return false;
|
||||
}
|
||||
if (!img->data) {
|
||||
LOG(FATAL) << "Could not open image at: " << path_to_img << ".";
|
||||
return false;
|
||||
}
|
||||
|
||||
if(median_blur > 1)
|
||||
{
|
||||
VLOG(1) << "Pre-filtering the texture with median filter of size: "
|
||||
<< median_blur << ".";
|
||||
cv::medianBlur(*img, *img, median_blur);
|
||||
}
|
||||
if (median_blur > 1) {
|
||||
VLOG(1) << "Pre-filtering the texture with median filter of size: "
|
||||
<< median_blur << ".";
|
||||
cv::medianBlur(*img, *img, median_blur);
|
||||
}
|
||||
|
||||
if(gaussian_blur > 0)
|
||||
{
|
||||
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: "
|
||||
<< gaussian_blur << ".";
|
||||
cv::GaussianBlur(*img, *img, cv::Size(21,21), gaussian_blur);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Object::Object(const std::string path_to_texture, const cv::Size& dst_size, const MotionParameters& motion_params,
|
||||
double median_blur, double gaussian_blur)
|
||||
: dst_size_(dst_size),
|
||||
p_(motion_params)
|
||||
{
|
||||
loadTexture(path_to_texture, &texture_,
|
||||
median_blur,
|
||||
gaussian_blur);
|
||||
|
||||
K0_ << texture_.cols, 0, 0.5 * texture_.cols,
|
||||
0, texture_.rows, 0.5 * texture_.rows,
|
||||
0, 0, 1;
|
||||
|
||||
K1_ << dst_size_.width, 0, 0.5 * dst_size_.width,
|
||||
0, dst_size_.height, 0.5 * dst_size_.height,
|
||||
0, 0, 1;
|
||||
|
||||
canvas_ = cv::Mat(dst_size, CV_8UC4);
|
||||
canvas_.setTo(0);
|
||||
flow_ = OpticFlow(dst_size);
|
||||
flow_.setTo(0);
|
||||
}
|
||||
|
||||
void Object::draw(Time t, bool is_first_layer)
|
||||
{
|
||||
canvas_.setTo(0);
|
||||
|
||||
ze::real_t ts = ze::nanosecToSecTrunc(t);
|
||||
|
||||
ts = std::min(ts, p_.tmax_);
|
||||
|
||||
AffineWithJacobian A10_jac = p_.getAffineTransformationWithJacobian(ts);
|
||||
Affine& A10 = A10_jac.first;
|
||||
Affine& dA10dt = A10_jac.second;
|
||||
|
||||
// test jacobian
|
||||
// const ze::real_t h = 1e-5;
|
||||
// AffineWithJacobian A = p_.getAffineTransformationWithJacobian(ts);
|
||||
// AffineWithJacobian Ah = p_.getAffineTransformationWithJacobian(ts+h);
|
||||
// Affine dAdt_numeric = 1./h * (Ah.first-A.first);
|
||||
|
||||
// LOG(INFO) << dAdt_numeric;
|
||||
// LOG(INFO) << A.second;
|
||||
|
||||
Affine M_10 = K1_ * A10 * K0_.inv();
|
||||
Affine dM10_dt = K1_ * dA10dt * K0_.inv();
|
||||
|
||||
// warpAffine requires M_dst_src unless the WARP_INVERSE_MAP flag is passed
|
||||
// in which case it will require M_src_dst
|
||||
// TODO: can we do something more efficient than fully warping the 4 channels (BGR+alpha)?
|
||||
int border_mode = is_first_layer ? cv::BORDER_REFLECT101 : cv::BORDER_CONSTANT;
|
||||
cv::warpPerspective(texture_,
|
||||
canvas_,
|
||||
M_10,
|
||||
canvas_.size(),
|
||||
cv::INTER_LINEAR,
|
||||
border_mode);
|
||||
|
||||
cv::Matx<FloatType, 3, 3> SS = dM10_dt * M_10.inv();
|
||||
|
||||
for(int y=0; y<flow_.rows; ++y)
|
||||
{
|
||||
for(int x=0; x<flow_.cols; ++x)
|
||||
{
|
||||
flow_(y,x)[0] = SS(0,0) * x + SS(0,1) * y + SS(0,2);
|
||||
flow_(y,x)[1] = SS(1,0) * x + SS(1,1) * y + SS(1,2);
|
||||
if (gaussian_blur > 0) {
|
||||
VLOG(1
|
||||
) << "Pre-filtering the texture with gaussian filter of size: "
|
||||
<< gaussian_blur << ".";
|
||||
cv::GaussianBlur(*img, *img, cv::Size(21, 21), gaussian_blur);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void getIntensityAndAlpha(const cv::Mat& image,
|
||||
int x, int y,
|
||||
ImageFloatType* intensity,
|
||||
ImageFloatType* alpha)
|
||||
{
|
||||
CHECK(image.type() == CV_8UC3 || image.type() == CV_8UC4);
|
||||
Object::Object(
|
||||
const std::string path_to_texture,
|
||||
const cv::Size& dst_size,
|
||||
const MotionParameters& motion_params,
|
||||
double median_blur,
|
||||
double gaussian_blur
|
||||
)
|
||||
: dst_size_(dst_size),
|
||||
p_(motion_params) {
|
||||
loadTexture(path_to_texture, &texture_, median_blur, gaussian_blur);
|
||||
|
||||
if(image.type() == CV_8UC3)
|
||||
{
|
||||
cv::Vec3b val = image.at<cv::Vec3b>(y,x);
|
||||
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
|
||||
*alpha = 1.;
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Vec4b val = image.at<cv::Vec4b>(y,x);
|
||||
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
|
||||
*alpha = static_cast<ImageFloatType>(val[3]) / 255.;
|
||||
}
|
||||
}
|
||||
K0_ << texture_.cols, 0, 0.5 * texture_.cols, 0, texture_.rows,
|
||||
0.5 * texture_.rows, 0, 0, 1;
|
||||
|
||||
K1_ << dst_size_.width, 0, 0.5 * dst_size_.width, 0, dst_size_.height,
|
||||
0.5 * dst_size_.height, 0, 0, 1;
|
||||
|
||||
canvas_ = cv::Mat(dst_size, CV_8UC4);
|
||||
canvas_.setTo(0);
|
||||
flow_ = OpticFlow(dst_size);
|
||||
flow_.setTo(0);
|
||||
}
|
||||
|
||||
void Object::draw(Time t, bool is_first_layer) {
|
||||
canvas_.setTo(0);
|
||||
|
||||
ze::real_t ts = ze::nanosecToSecTrunc(t);
|
||||
|
||||
ts = std::min(ts, p_.tmax_);
|
||||
|
||||
AffineWithJacobian A10_jac = p_.getAffineTransformationWithJacobian(ts);
|
||||
Affine& A10 = A10_jac.first;
|
||||
Affine& dA10dt = A10_jac.second;
|
||||
|
||||
// test jacobian
|
||||
// const ze::real_t h = 1e-5;
|
||||
// AffineWithJacobian A = p_.getAffineTransformationWithJacobian(ts);
|
||||
// AffineWithJacobian Ah =
|
||||
// p_.getAffineTransformationWithJacobian(ts+h); Affine dAdt_numeric
|
||||
// = 1./h * (Ah.first-A.first);
|
||||
|
||||
// LOG(INFO) << dAdt_numeric;
|
||||
// LOG(INFO) << A.second;
|
||||
|
||||
Affine M_10 = K1_ * A10 * K0_.inv();
|
||||
Affine dM10_dt = K1_ * dA10dt * K0_.inv();
|
||||
|
||||
// warpAffine requires M_dst_src unless the WARP_INVERSE_MAP flag is
|
||||
// passed in which case it will require M_src_dst
|
||||
// TODO: can we do something more efficient than fully warping the 4
|
||||
// channels (BGR+alpha)?
|
||||
int border_mode =
|
||||
is_first_layer ? cv::BORDER_REFLECT101 : cv::BORDER_CONSTANT;
|
||||
cv::warpPerspective(
|
||||
texture_,
|
||||
canvas_,
|
||||
M_10,
|
||||
canvas_.size(),
|
||||
cv::INTER_LINEAR,
|
||||
border_mode
|
||||
);
|
||||
|
||||
cv::Matx<FloatType, 3, 3> SS = dM10_dt * M_10.inv();
|
||||
|
||||
for (int y = 0; y < flow_.rows; ++y) {
|
||||
for (int x = 0; x < flow_.cols; ++x) {
|
||||
flow_(y, x)[0] = SS(0, 0) * x + SS(0, 1) * y + SS(0, 2);
|
||||
flow_(y, x)[1] = SS(1, 0) * x + SS(1, 1) * y + SS(1, 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void getIntensityAndAlpha(
|
||||
const cv::Mat& image,
|
||||
int x,
|
||||
int y,
|
||||
ImageFloatType* intensity,
|
||||
ImageFloatType* alpha
|
||||
) {
|
||||
CHECK(image.type() == CV_8UC3 || image.type() == CV_8UC4);
|
||||
|
||||
if (image.type() == CV_8UC3) {
|
||||
cv::Vec3b val = image.at<cv::Vec3b>(y, x);
|
||||
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
|
||||
*alpha = 1.;
|
||||
} else {
|
||||
cv::Vec4b val = image.at<cv::Vec4b>(y, x);
|
||||
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
|
||||
*alpha = static_cast<ImageFloatType>(val[3]) / 255.;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -2,134 +2,164 @@
|
||||
|
||||
#include <esim/rendering/renderer_base.hpp>
|
||||
|
||||
class Shader; // fwd
|
||||
class Model; // fwd
|
||||
class Shader; // fwd
|
||||
class Model; // fwd
|
||||
class GLFWwindow; // fwd
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! Rendering engine based on OpenGL
|
||||
class OpenGLRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
//! Rendering engine based on OpenGL
|
||||
class OpenGLRenderer : public Renderer {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
ZE_POINTER_TYPEDEFS(Renderer);
|
||||
ZE_POINTER_TYPEDEFS(Renderer);
|
||||
|
||||
OpenGLRenderer();
|
||||
OpenGLRenderer();
|
||||
|
||||
~OpenGLRenderer();
|
||||
~OpenGLRenderer();
|
||||
|
||||
//! Render an image at a given pose.
|
||||
virtual void render(const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap) const;
|
||||
//! Render an image at a given pose.
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const;
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||
virtual bool canComputeOpticFlow() const override { return true; }
|
||||
//! Returns true if the rendering engine can compute optic flow, false
|
||||
//! otherwise
|
||||
virtual bool canComputeOpticFlow() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
//! Render an image + depth map + optic flow map at a given pose,
|
||||
//! given the camera linear and angular velocity
|
||||
virtual void renderWithFlow(const Transformation& T_W_C,
|
||||
const LinearVelocity& v_WC,
|
||||
const AngularVelocity& w_WC,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map) const override;
|
||||
//! Render an image + depth map + optic flow map at a given pose,
|
||||
//! given the camera linear and angular velocity
|
||||
virtual void renderWithFlow(
|
||||
const Transformation& T_W_C,
|
||||
const LinearVelocity& v_WC,
|
||||
const AngularVelocity& w_WC,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const override;
|
||||
|
||||
//! Sets the camera
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera);
|
||||
//! Sets the camera
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera);
|
||||
|
||||
protected:
|
||||
protected:
|
||||
void init();
|
||||
|
||||
void init();
|
||||
/**
|
||||
@brief basic function to produce an OpenGL projection matrix and
|
||||
associated viewport parameters which match a given set of camera
|
||||
intrinsics. This is currently written for the Eigen linear algebra
|
||||
library, however it should be straightforward to port to any 4x4 matrix
|
||||
class.
|
||||
@param[out] frustum Eigen::Matrix4d projection matrix. Eigen stores
|
||||
these matrices in column-major (i.e. OpenGL) order.
|
||||
@param[in] alpha x-axis focal length, from camera intrinsic matrix
|
||||
@param[in] alpha y-axis focal length, from camera intrinsic matrix
|
||||
@param[in] skew x and y axis skew, from camera intrinsic matrix
|
||||
@param[in] u0 image origin x-coordinate, from camera intrinsic matrix
|
||||
@param[in] v0 image origin y-coordinate, from camera intrinsic matrix
|
||||
@param[in] img_width image width, in pixels
|
||||
@param[in] img_height image height, in pixels
|
||||
@param[in] near_clip near clipping plane z-location, can be set
|
||||
arbitrarily > 0, controls the mapping of z-coordinates for OpenGL
|
||||
@param[in] far_clip far clipping plane z-location, can be set
|
||||
arbitrarily > near_clip, controls the mapping of z-coordinate for
|
||||
OpenGL
|
||||
|
||||
/**
|
||||
@brief basic function to produce an OpenGL projection matrix and associated viewport parameters
|
||||
which match a given set of camera intrinsics. This is currently written for the Eigen linear
|
||||
algebra library, however it should be straightforward to port to any 4x4 matrix class.
|
||||
@param[out] frustum Eigen::Matrix4d projection matrix. Eigen stores these matrices in column-major (i.e. OpenGL) order.
|
||||
@param[in] alpha x-axis focal length, from camera intrinsic matrix
|
||||
@param[in] alpha y-axis focal length, from camera intrinsic matrix
|
||||
@param[in] skew x and y axis skew, from camera intrinsic matrix
|
||||
@param[in] u0 image origin x-coordinate, from camera intrinsic matrix
|
||||
@param[in] v0 image origin y-coordinate, from camera intrinsic matrix
|
||||
@param[in] img_width image width, in pixels
|
||||
@param[in] img_height image height, in pixels
|
||||
@param[in] near_clip near clipping plane z-location, can be set arbitrarily > 0, controls the mapping of z-coordinates for OpenGL
|
||||
@param[in] far_clip far clipping plane z-location, can be set arbitrarily > near_clip, controls the mapping of z-coordinate for OpenGL
|
||||
Code adapted from:
|
||||
- http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
|
||||
- https://pastebin.com/h8nYNWJY
|
||||
*/
|
||||
void build_opengl_projection_for_intrinsics(
|
||||
Eigen::Matrix4d& frustum,
|
||||
double alpha,
|
||||
double beta,
|
||||
double u0,
|
||||
double v0,
|
||||
int img_width,
|
||||
int img_height,
|
||||
double near_clip,
|
||||
double far_clip
|
||||
) const {
|
||||
// These parameters define the final viewport that is rendered into
|
||||
// by the camera.
|
||||
double L = 0;
|
||||
double R = img_width;
|
||||
double B = 0;
|
||||
double T = img_height;
|
||||
|
||||
Code adapted from:
|
||||
- http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
|
||||
- https://pastebin.com/h8nYNWJY
|
||||
*/
|
||||
void build_opengl_projection_for_intrinsics(Eigen::Matrix4d &frustum, double alpha, double beta, double u0, double v0, int img_width, int img_height, double near_clip, double far_clip) const {
|
||||
// near and far clipping planes, these only matter for the mapping
|
||||
// from world-space z-coordinate into the depth coordinate for
|
||||
// OpenGL
|
||||
double N = near_clip;
|
||||
double F = far_clip;
|
||||
|
||||
// These parameters define the final viewport that is rendered into by
|
||||
// the camera.
|
||||
double L = 0;
|
||||
double R = img_width;
|
||||
double B = 0;
|
||||
double T = img_height;
|
||||
double skew = 0.0;
|
||||
|
||||
// near and far clipping planes, these only matter for the mapping from
|
||||
// world-space z-coordinate into the depth coordinate for OpenGL
|
||||
double N = near_clip;
|
||||
double F = far_clip;
|
||||
// construct an orthographic matrix which maps from projected
|
||||
// coordinates to normalized device coordinates in the range
|
||||
// [-1, 1]. OpenGL then maps coordinates in NDC to the current
|
||||
// viewport
|
||||
Eigen::Matrix4d ortho = Eigen::Matrix4d::Zero();
|
||||
ortho(0, 0) = 2.0 / (R - L);
|
||||
ortho(0, 3) = -(R + L) / (R - L);
|
||||
ortho(1, 1) = 2.0 / (T - B);
|
||||
ortho(1, 3) = -(T + B) / (T - B);
|
||||
ortho(2, 2) = -2.0 / (F - N);
|
||||
ortho(2, 3) = -(F + N) / (F - N);
|
||||
ortho(3, 3) = 1.0;
|
||||
|
||||
double skew = 0.0;
|
||||
// construct a projection matrix, this is identical to the
|
||||
// projection matrix computed for the intrinsicx, except an
|
||||
// additional row is inserted to map the z-coordinate to
|
||||
// OpenGL.
|
||||
Eigen::Matrix4d tproj = Eigen::Matrix4d::Zero();
|
||||
tproj(0, 0) = alpha;
|
||||
tproj(0, 1) = skew;
|
||||
tproj(0, 2) = -u0;
|
||||
tproj(1, 1) = beta;
|
||||
tproj(1, 2) = -v0;
|
||||
tproj(2, 2) = N + F;
|
||||
tproj(2, 3) = N * F;
|
||||
tproj(3, 2) = -1.0;
|
||||
|
||||
// construct an orthographic matrix which maps from projected
|
||||
// coordinates to normalized device coordinates in the range
|
||||
// [-1, 1]. OpenGL then maps coordinates in NDC to the current
|
||||
// viewport
|
||||
Eigen::Matrix4d ortho = Eigen::Matrix4d::Zero();
|
||||
ortho(0,0) = 2.0/(R-L); ortho(0,3) = -(R+L)/(R-L);
|
||||
ortho(1,1) = 2.0/(T-B); ortho(1,3) = -(T+B)/(T-B);
|
||||
ortho(2,2) = -2.0/(F-N); ortho(2,3) = -(F+N)/(F-N);
|
||||
ortho(3,3) = 1.0;
|
||||
// resulting OpenGL frustum is the product of the orthographic
|
||||
// mapping to normalized device coordinates and the augmented
|
||||
// camera intrinsic matrix
|
||||
frustum = ortho * tproj;
|
||||
}
|
||||
|
||||
// construct a projection matrix, this is identical to the
|
||||
// projection matrix computed for the intrinsicx, except an
|
||||
// additional row is inserted to map the z-coordinate to
|
||||
// OpenGL.
|
||||
Eigen::Matrix4d tproj = Eigen::Matrix4d::Zero();
|
||||
tproj(0,0) = alpha; tproj(0,1) = skew; tproj(0,2) = -u0;
|
||||
tproj(1,1) = beta; tproj(1,2) = -v0;
|
||||
tproj(2,2) = N+F; tproj(2,3) = N*F;
|
||||
tproj(3,2) = -1.0;
|
||||
GLFWwindow* window;
|
||||
std::unique_ptr<Shader> shader;
|
||||
std::unique_ptr<Shader> optic_flow_shader;
|
||||
std::unique_ptr<Model> our_model;
|
||||
std::vector<std::unique_ptr<Model>> dynamic_objects_model;
|
||||
|
||||
// resulting OpenGL frustum is the product of the orthographic
|
||||
// mapping to normalized device coordinates and the augmented
|
||||
// camera intrinsic matrix
|
||||
frustum = ortho*tproj;
|
||||
}
|
||||
bool is_initialized_;
|
||||
|
||||
GLFWwindow* window;
|
||||
std::unique_ptr<Shader> shader;
|
||||
std::unique_ptr<Shader> optic_flow_shader;
|
||||
std::unique_ptr<Model> our_model;
|
||||
std::vector<std::unique_ptr<Model>> dynamic_objects_model;
|
||||
unsigned int width_;
|
||||
unsigned int height_;
|
||||
|
||||
bool is_initialized_;
|
||||
unsigned int texture1;
|
||||
unsigned int texture2;
|
||||
|
||||
unsigned int width_;
|
||||
unsigned int height_;
|
||||
unsigned int VBO, VAO;
|
||||
unsigned int multisampled_fbo, multisampled_color_buf,
|
||||
multisampled_depth_buf;
|
||||
unsigned int fbo, color_buf, depth_buf,
|
||||
depth_buf_of; // framebuffer for color and depth images
|
||||
unsigned int fbo_of, of_buf; // framebuffer for optic flow
|
||||
|
||||
unsigned int texture1;
|
||||
unsigned int texture2;
|
||||
|
||||
unsigned int VBO, VAO;
|
||||
unsigned int multisampled_fbo, multisampled_color_buf, multisampled_depth_buf;
|
||||
unsigned int fbo, color_buf, depth_buf, depth_buf_of; // framebuffer for color and depth images
|
||||
unsigned int fbo_of, of_buf; // framebuffer for optic flow
|
||||
|
||||
float zmin = 0.1f;
|
||||
float zmax = 10.0f;
|
||||
};
|
||||
float zmin = 0.1f;
|
||||
float zmax = 10.0f;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,48 +4,60 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! A rendering engine for planar scenes
|
||||
class PanoramaRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
//! A rendering engine for planar scenes
|
||||
class PanoramaRenderer : public Renderer {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
PanoramaRenderer(const Image& texture,
|
||||
const Transformation::Rotation& R_W_P);
|
||||
PanoramaRenderer(
|
||||
const Image& texture, const Transformation::Rotation& R_W_P
|
||||
);
|
||||
|
||||
~PanoramaRenderer();
|
||||
~PanoramaRenderer();
|
||||
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const;
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const;
|
||||
|
||||
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override
|
||||
{
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
void render(
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const override {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||
virtual bool canComputeOpticFlow() const override { return false; }
|
||||
//! Returns true if the rendering engine can compute optic flow, false
|
||||
//! otherwise
|
||||
virtual bool canComputeOpticFlow() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
|
||||
protected:
|
||||
protected:
|
||||
void precomputePixelToBearingLookupTable();
|
||||
void projectToPanorama(
|
||||
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
|
||||
) const;
|
||||
|
||||
void precomputePixelToBearingLookupTable();
|
||||
void projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const;
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Transformation::Rotation R_W_P_;
|
||||
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Transformation::Rotation R_W_P_;
|
||||
// Intrinsic parameters of the panorama camera
|
||||
FloatType fx_, fy_;
|
||||
ze::Keypoint principal_point_;
|
||||
|
||||
// Intrinsic parameters of the panorama camera
|
||||
FloatType fx_, fy_;
|
||||
ze::Keypoint principal_point_;
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,113 +1,111 @@
|
||||
#include <esim/common/utils.hpp>
|
||||
#include <esim/imp_panorama_renderer/panorama_renderer.hpp>
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <glog/logging.h>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/core/eigen.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
PanoramaRenderer::PanoramaRenderer(const Image& texture,
|
||||
const Transformation::Rotation &R_W_P)
|
||||
: texture_(texture),
|
||||
R_W_P_(R_W_P)
|
||||
{
|
||||
principal_point_ = ze::Keypoint(0.5 * texture_.cols,
|
||||
0.5 * texture_.rows);
|
||||
fx_ = texture_.cols / (2.0 * CV_PI);
|
||||
fy_ = texture_.rows / CV_PI;
|
||||
LOG(INFO) << "Initialized panoramic camera with size: "
|
||||
<< texture_.cols << "x" << texture_.rows
|
||||
<< ", principal point: " << principal_point_.transpose()
|
||||
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px";
|
||||
}
|
||||
|
||||
PanoramaRenderer::~PanoramaRenderer()
|
||||
{
|
||||
}
|
||||
|
||||
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera)
|
||||
{
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
|
||||
|
||||
void PanoramaRenderer::precomputePixelToBearingLookupTable()
|
||||
{
|
||||
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
|
||||
PanoramaRenderer::PanoramaRenderer(
|
||||
const Image& texture, const Transformation::Rotation& R_W_P
|
||||
)
|
||||
: texture_(texture),
|
||||
R_W_P_(R_W_P) {
|
||||
principal_point_ =
|
||||
ze::Keypoint(0.5 * texture_.cols, 0.5 * texture_.rows);
|
||||
fx_ = texture_.cols / (2.0 * CV_PI);
|
||||
fy_ = texture_.rows / CV_PI;
|
||||
LOG(INFO) << "Initialized panoramic camera with size: " << texture_.cols
|
||||
<< "x" << texture_.rows
|
||||
<< ", principal point: " << principal_point_.transpose()
|
||||
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px";
|
||||
}
|
||||
}
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
PanoramaRenderer::~PanoramaRenderer() {}
|
||||
|
||||
void PanoramaRenderer::projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const
|
||||
{
|
||||
CHECK(keypoint);
|
||||
static constexpr FloatType kEpsilon = 1e-10;
|
||||
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera) {
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
const FloatType X = f[0];
|
||||
const FloatType Y = f[1];
|
||||
const FloatType Z = f[2];
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
const FloatType rho2 = X*X+Y*Y+Z*Z;
|
||||
const FloatType rho = std::sqrt(rho2);
|
||||
|
||||
CHECK_GE(rho, kEpsilon);
|
||||
|
||||
const FloatType phi = std::atan2(X, Z);
|
||||
const FloatType theta = std::asin(-Y/rho);
|
||||
|
||||
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
|
||||
}
|
||||
|
||||
|
||||
void PanoramaRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
|
||||
{
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation::RotationMatrix R_P_C = R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
|
||||
|
||||
ze::Bearings bearings_P = R_P_C * bearings_C_;
|
||||
ze::Keypoint keypoint;
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
|
||||
|
||||
// project bearing vector to panorama image
|
||||
projectToPanorama(f, &keypoint);
|
||||
|
||||
map_x_(y,x) = static_cast<float>(keypoint[0]);
|
||||
map_y_(y,x) = static_cast<float>(keypoint[1]);
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
}
|
||||
|
||||
cv::remap(texture_, *out_image, map_x_, map_y_, cv::INTER_LINEAR, cv::BORDER_REFLECT_101);
|
||||
void PanoramaRenderer::precomputePixelToBearingLookupTable() {
|
||||
// points_C is a matrix containing the coordinates of each pixel
|
||||
// coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for (int y = 0; y < camera_->height(); ++y)
|
||||
for (int x = 0; x < camera_->width(); ++x)
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
if(out_depthmap)
|
||||
{
|
||||
static constexpr ImageFloatType infinity = 1e10;
|
||||
out_depthmap->setTo(infinity);
|
||||
}
|
||||
}
|
||||
void PanoramaRenderer::projectToPanorama(
|
||||
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
|
||||
) const {
|
||||
CHECK(keypoint);
|
||||
static constexpr FloatType kEpsilon = 1e-10;
|
||||
|
||||
const FloatType X = f[0];
|
||||
const FloatType Y = f[1];
|
||||
const FloatType Z = f[2];
|
||||
|
||||
const FloatType rho2 = X * X + Y * Y + Z * Z;
|
||||
const FloatType rho = std::sqrt(rho2);
|
||||
|
||||
CHECK_GE(rho, kEpsilon);
|
||||
|
||||
const FloatType phi = std::atan2(X, Z);
|
||||
const FloatType theta = std::asin(-Y / rho);
|
||||
|
||||
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
|
||||
}
|
||||
|
||||
void PanoramaRenderer::render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation::RotationMatrix R_P_C =
|
||||
R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
|
||||
|
||||
ze::Bearings bearings_P = R_P_C * bearings_C_;
|
||||
ze::Keypoint keypoint;
|
||||
for (int y = 0; y < camera_->height(); ++y) {
|
||||
for (int x = 0; x < camera_->width(); ++x) {
|
||||
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
|
||||
|
||||
// project bearing vector to panorama image
|
||||
projectToPanorama(f, &keypoint);
|
||||
|
||||
map_x_(y, x) = static_cast<float>(keypoint[0]);
|
||||
map_y_(y, x) = static_cast<float>(keypoint[1]);
|
||||
}
|
||||
}
|
||||
|
||||
cv::remap(
|
||||
texture_,
|
||||
*out_image,
|
||||
map_x_,
|
||||
map_y_,
|
||||
cv::INTER_LINEAR,
|
||||
cv::BORDER_REFLECT_101
|
||||
);
|
||||
|
||||
if (out_depthmap) {
|
||||
static constexpr ImageFloatType infinity = 1e10;
|
||||
out_depthmap->setTo(infinity);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -4,40 +4,45 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! A rendering engine for planar scenes
|
||||
class PanoramaRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
//! A rendering engine for planar scenes
|
||||
class PanoramaRenderer : public Renderer {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
PanoramaRenderer(const Image& texture,
|
||||
const Transformation::Rotation& R_W_P);
|
||||
PanoramaRenderer(
|
||||
const Image& texture, const Transformation::Rotation& R_W_P
|
||||
);
|
||||
|
||||
~PanoramaRenderer();
|
||||
~PanoramaRenderer();
|
||||
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override;
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const override;
|
||||
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
|
||||
protected:
|
||||
protected:
|
||||
void precomputePixelToBearingLookupTable();
|
||||
void projectToPanorama(
|
||||
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
|
||||
) const;
|
||||
|
||||
void precomputePixelToBearingLookupTable();
|
||||
void projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const;
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Transformation::Rotation R_W_P_;
|
||||
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Transformation::Rotation R_W_P_;
|
||||
// Intrinsic parameters of the panorama camera
|
||||
FloatType fx_, fy_;
|
||||
ze::Keypoint principal_point_;
|
||||
|
||||
// Intrinsic parameters of the panorama camera
|
||||
FloatType fx_, fy_;
|
||||
ze::Keypoint principal_point_;
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -4,56 +4,68 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! A rendering engine for planar scenes
|
||||
class PlanarRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
//! A rendering engine for planar scenes
|
||||
class PlanarRenderer : public Renderer {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
PlanarRenderer(const Image& texture,
|
||||
const Camera::Ptr& cam_src,
|
||||
const Transformation &T_W_P,
|
||||
FloatType z_min,
|
||||
FloatType z_max,
|
||||
bool extend_border);
|
||||
PlanarRenderer(
|
||||
const Image& texture,
|
||||
const Camera::Ptr& cam_src,
|
||||
const Transformation& T_W_P,
|
||||
FloatType z_min,
|
||||
FloatType z_max,
|
||||
bool extend_border
|
||||
);
|
||||
|
||||
~PlanarRenderer();
|
||||
~PlanarRenderer();
|
||||
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const;
|
||||
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override
|
||||
{
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const;
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||
virtual bool canComputeOpticFlow() const override { return false; }
|
||||
void render(
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const override {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
//! Returns true if the rendering engine can compute optic flow, false
|
||||
//! otherwise
|
||||
virtual bool canComputeOpticFlow() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
|
||||
void precomputePixelToBearingLookupTable();
|
||||
protected:
|
||||
void precomputePixelToBearingLookupTable();
|
||||
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Camera::Ptr cam_src_;
|
||||
CalibrationMatrix K_src_, K_src_inv_;
|
||||
// Texture mapped on the plane
|
||||
Image texture_;
|
||||
Camera::Ptr cam_src_;
|
||||
CalibrationMatrix K_src_, K_src_inv_;
|
||||
|
||||
// Plane parameters
|
||||
Transformation T_W_P_;
|
||||
// Plane parameters
|
||||
Transformation T_W_P_;
|
||||
|
||||
// Clipping parameters
|
||||
FloatType z_min_;
|
||||
FloatType z_max_;
|
||||
// Clipping parameters
|
||||
FloatType z_min_;
|
||||
FloatType z_max_;
|
||||
|
||||
bool extend_border_;
|
||||
bool extend_border_;
|
||||
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
// Precomputed lookup table from keypoint -> bearing vector
|
||||
ze::Bearings bearings_C_;
|
||||
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
// Preallocated warping matrices
|
||||
mutable cv::Mat_<float> map_x_, map_y_;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,42 +1,47 @@
|
||||
#pragma once
|
||||
|
||||
#include <ze/common/macros.hpp>
|
||||
#include <esim/common/types.hpp>
|
||||
#include <ze/common/macros.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
//! Represents a rendering engine that generates images (and other outputs,
|
||||
//! such as depth maps, or optical flow maps) given a scene and a camera position.
|
||||
class Renderer
|
||||
{
|
||||
public:
|
||||
ZE_POINTER_TYPEDEFS(Renderer);
|
||||
//! Represents a rendering engine that generates images (and other outputs,
|
||||
//! such as depth maps, or optical flow maps) given a scene and a camera
|
||||
//! position.
|
||||
class Renderer {
|
||||
public:
|
||||
ZE_POINTER_TYPEDEFS(Renderer);
|
||||
|
||||
Renderer() {}
|
||||
Renderer() {}
|
||||
|
||||
//! Render an image at a given pose.
|
||||
virtual void render(const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap) const = 0;
|
||||
//! Render an image at a given pose.
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const = 0;
|
||||
|
||||
//! Render an image + depth map + optic flow map at a given pose,
|
||||
//! given the camera linear and angular velocity
|
||||
virtual void render(const Transformation& T_W_C,
|
||||
const LinearVelocity& v_WC,
|
||||
const AngularVelocity& w_WC,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map) const {}
|
||||
//! Render an image + depth map + optic flow map at a given pose,
|
||||
//! given the camera linear and angular velocity
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const LinearVelocity& v_WC,
|
||||
const AngularVelocity& w_WC,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const {}
|
||||
|
||||
|
||||
//! Sets the camera
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
|
||||
//! Sets the camera
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
|
||||
|
||||
//! Get the camera rig
|
||||
const ze::Camera::Ptr& getCamera() const { return camera_; }
|
||||
//! Get the camera rig
|
||||
const ze::Camera::Ptr& getCamera() const {
|
||||
return camera_;
|
||||
}
|
||||
|
||||
protected:
|
||||
ze::Camera::Ptr camera_;
|
||||
};
|
||||
protected:
|
||||
ze::Camera::Ptr camera_;
|
||||
};
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,11 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include <esim/rendering/renderer_base.hpp>
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat *img);
|
||||
Renderer::Ptr loadRendererFromGflags();
|
||||
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img);
|
||||
Renderer::Ptr loadRendererFromGflags();
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,113 +1,111 @@
|
||||
#include <esim/common/utils.hpp>
|
||||
#include <esim/rendering/panorama_renderer.hpp>
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <glog/logging.h>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/core/eigen.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
PanoramaRenderer::PanoramaRenderer(const Image& texture,
|
||||
const Transformation::Rotation &R_W_P)
|
||||
: texture_(texture),
|
||||
R_W_P_(R_W_P)
|
||||
{
|
||||
principal_point_ = ze::Keypoint(0.5 * texture_.cols,
|
||||
0.5 * texture_.rows);
|
||||
fx_ = texture_.cols / (2.0 * CV_PI);
|
||||
fy_ = texture_.rows / CV_PI;
|
||||
LOG(INFO) << "Initialized panoramic camera with size: "
|
||||
<< texture_.cols << "x" << texture_.rows
|
||||
<< ", principal point: " << principal_point_.transpose()
|
||||
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px";
|
||||
}
|
||||
|
||||
PanoramaRenderer::~PanoramaRenderer()
|
||||
{
|
||||
}
|
||||
|
||||
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera)
|
||||
{
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
|
||||
|
||||
void PanoramaRenderer::precomputePixelToBearingLookupTable()
|
||||
{
|
||||
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
|
||||
PanoramaRenderer::PanoramaRenderer(
|
||||
const Image& texture, const Transformation::Rotation& R_W_P
|
||||
)
|
||||
: texture_(texture),
|
||||
R_W_P_(R_W_P) {
|
||||
principal_point_ =
|
||||
ze::Keypoint(0.5 * texture_.cols, 0.5 * texture_.rows);
|
||||
fx_ = texture_.cols / (2.0 * CV_PI);
|
||||
fy_ = texture_.rows / CV_PI;
|
||||
LOG(INFO) << "Initialized panoramic camera with size: " << texture_.cols
|
||||
<< "x" << texture_.rows
|
||||
<< ", principal point: " << principal_point_.transpose()
|
||||
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px";
|
||||
}
|
||||
}
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
PanoramaRenderer::~PanoramaRenderer() {}
|
||||
|
||||
void PanoramaRenderer::projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const
|
||||
{
|
||||
CHECK(keypoint);
|
||||
static constexpr FloatType kEpsilon = 1e-10;
|
||||
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera) {
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
const FloatType X = f[0];
|
||||
const FloatType Y = f[1];
|
||||
const FloatType Z = f[2];
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
const FloatType rho2 = X*X+Y*Y+Z*Z;
|
||||
const FloatType rho = std::sqrt(rho2);
|
||||
|
||||
CHECK_GE(rho, kEpsilon);
|
||||
|
||||
const FloatType phi = std::atan2(X, Z);
|
||||
const FloatType theta = std::asin(-Y/rho);
|
||||
|
||||
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
|
||||
}
|
||||
|
||||
|
||||
void PanoramaRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
|
||||
{
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation::RotationMatrix R_P_C = R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
|
||||
|
||||
ze::Bearings bearings_P = R_P_C * bearings_C_;
|
||||
ze::Keypoint keypoint;
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
|
||||
|
||||
// project bearing vector to panorama image
|
||||
projectToPanorama(f, &keypoint);
|
||||
|
||||
map_x_(y,x) = static_cast<float>(keypoint[0]);
|
||||
map_y_(y,x) = static_cast<float>(keypoint[1]);
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
}
|
||||
|
||||
cv::remap(texture_, *out_image, map_x_, map_y_, CV_INTER_LINEAR, cv::BORDER_REFLECT_101);
|
||||
void PanoramaRenderer::precomputePixelToBearingLookupTable() {
|
||||
// points_C is a matrix containing the coordinates of each pixel
|
||||
// coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for (int y = 0; y < camera_->height(); ++y)
|
||||
for (int x = 0; x < camera_->width(); ++x)
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
if(out_depthmap)
|
||||
{
|
||||
static constexpr ImageFloatType infinity = 1e10;
|
||||
out_depthmap->setTo(infinity);
|
||||
}
|
||||
}
|
||||
void PanoramaRenderer::projectToPanorama(
|
||||
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
|
||||
) const {
|
||||
CHECK(keypoint);
|
||||
static constexpr FloatType kEpsilon = 1e-10;
|
||||
|
||||
const FloatType X = f[0];
|
||||
const FloatType Y = f[1];
|
||||
const FloatType Z = f[2];
|
||||
|
||||
const FloatType rho2 = X * X + Y * Y + Z * Z;
|
||||
const FloatType rho = std::sqrt(rho2);
|
||||
|
||||
CHECK_GE(rho, kEpsilon);
|
||||
|
||||
const FloatType phi = std::atan2(X, Z);
|
||||
const FloatType theta = std::asin(-Y / rho);
|
||||
|
||||
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
|
||||
}
|
||||
|
||||
void PanoramaRenderer::render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation::RotationMatrix R_P_C =
|
||||
R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
|
||||
|
||||
ze::Bearings bearings_P = R_P_C * bearings_C_;
|
||||
ze::Keypoint keypoint;
|
||||
for (int y = 0; y < camera_->height(); ++y) {
|
||||
for (int x = 0; x < camera_->width(); ++x) {
|
||||
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
|
||||
|
||||
// project bearing vector to panorama image
|
||||
projectToPanorama(f, &keypoint);
|
||||
|
||||
map_x_(y, x) = static_cast<float>(keypoint[0]);
|
||||
map_y_(y, x) = static_cast<float>(keypoint[1]);
|
||||
}
|
||||
}
|
||||
|
||||
cv::remap(
|
||||
texture_,
|
||||
*out_image,
|
||||
map_x_,
|
||||
map_y_,
|
||||
CV_INTER_LINEAR,
|
||||
cv::BORDER_REFLECT_101
|
||||
);
|
||||
|
||||
if (out_depthmap) {
|
||||
static constexpr ImageFloatType infinity = 1e10;
|
||||
out_depthmap->setTo(infinity);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,146 +1,158 @@
|
||||
#include <esim/common/utils.hpp>
|
||||
#include <esim/imp_planar_renderer/planar_renderer.hpp>
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <glog/logging.h>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/core/eigen.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <ze/common/timer_collection.hpp>
|
||||
|
||||
DECLARE_TIMER(TimerPlanarRenderer, timers_planar_renderer,
|
||||
compute_remap_maps,
|
||||
inverse_homography,
|
||||
remap,
|
||||
compute_depthmap
|
||||
);
|
||||
DECLARE_TIMER(
|
||||
TimerPlanarRenderer,
|
||||
timers_planar_renderer,
|
||||
compute_remap_maps,
|
||||
inverse_homography,
|
||||
remap,
|
||||
compute_depthmap
|
||||
);
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
PlanarRenderer::PlanarRenderer(const Image& texture,
|
||||
const Camera::Ptr &cam_src,
|
||||
const Transformation& T_W_P,
|
||||
FloatType z_min,
|
||||
FloatType z_max,
|
||||
bool extend_border)
|
||||
: texture_(texture),
|
||||
cam_src_(cam_src),
|
||||
T_W_P_(T_W_P),
|
||||
z_min_(z_min),
|
||||
z_max_(z_max),
|
||||
extend_border_(extend_border)
|
||||
{
|
||||
K_src_ = calibrationMatrixFromCamera(cam_src_);
|
||||
K_src_inv_ = K_src_.inverse();
|
||||
VLOG(1) << "K_src = " << K_src_;
|
||||
PlanarRenderer::PlanarRenderer(
|
||||
const Image& texture,
|
||||
const Camera::Ptr& cam_src,
|
||||
const Transformation& T_W_P,
|
||||
FloatType z_min,
|
||||
FloatType z_max,
|
||||
bool extend_border
|
||||
)
|
||||
: texture_(texture),
|
||||
cam_src_(cam_src),
|
||||
T_W_P_(T_W_P),
|
||||
z_min_(z_min),
|
||||
z_max_(z_max),
|
||||
extend_border_(extend_border) {
|
||||
K_src_ = calibrationMatrixFromCamera(cam_src_);
|
||||
K_src_inv_ = K_src_.inverse();
|
||||
VLOG(1) << "K_src = " << K_src_;
|
||||
|
||||
LOG(INFO) << "T_W_P = " << T_W_P_;
|
||||
}
|
||||
|
||||
PlanarRenderer::~PlanarRenderer()
|
||||
{
|
||||
timers_planar_renderer.saveToFile("/tmp", "planar_renderer.csv");
|
||||
}
|
||||
|
||||
void PlanarRenderer::setCamera(const ze::Camera::Ptr& camera)
|
||||
{
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
|
||||
|
||||
void PlanarRenderer::precomputePixelToBearingLookupTable()
|
||||
{
|
||||
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
|
||||
LOG(INFO) << "T_W_P = " << T_W_P_;
|
||||
}
|
||||
}
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
void PlanarRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
|
||||
{
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation T_C_W = T_W_C.inverse();
|
||||
const Transformation T_C_P = T_C_W * T_W_P_;
|
||||
const Transformation T_P_C = T_C_P.inverse();
|
||||
|
||||
ze::Matrix33 tmp;
|
||||
tmp.col(0) = T_C_P.getRotationMatrix().col(0);
|
||||
tmp.col(1) = T_C_P.getRotationMatrix().col(1);
|
||||
tmp.col(2) = T_C_P.getPosition();
|
||||
|
||||
VLOG_EVERY_N(3, 100) << "T_C_P = " << T_C_P;
|
||||
VLOG_EVERY_N(3, 100) << "tmp = " << tmp;
|
||||
HomographyMatrix H_C_P = tmp;
|
||||
|
||||
HomographyMatrix H_P_C;
|
||||
{
|
||||
auto t = timers_planar_renderer[TimerPlanarRenderer::inverse_homography].timeScope();
|
||||
H_P_C = H_C_P.inverse();
|
||||
}
|
||||
|
||||
{
|
||||
auto t = timers_planar_renderer[TimerPlanarRenderer::compute_remap_maps].timeScope();
|
||||
ze::Bearings bearings_P = H_P_C * bearings_C_;
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
|
||||
map_x_(y,x) = static_cast<float>(K_src_(0,0) * f[0]/f[2] + K_src_(0,2));
|
||||
map_y_(y,x) = static_cast<float>(K_src_(1,1) * f[1]/f[2] + K_src_(1,2));
|
||||
}
|
||||
PlanarRenderer::~PlanarRenderer() {
|
||||
timers_planar_renderer.saveToFile("/tmp", "planar_renderer.csv");
|
||||
}
|
||||
}
|
||||
|
||||
int border = (extend_border_) ? cv::BORDER_REFLECT : cv::BORDER_CONSTANT;
|
||||
{
|
||||
auto t = timers_planar_renderer[TimerPlanarRenderer::remap].timeScope();
|
||||
cv::remap(texture_, *out_image, map_x_, map_y_, cv::INTER_LINEAR, border, 0.8);
|
||||
}
|
||||
void PlanarRenderer::setCamera(const ze::Camera::Ptr& camera) {
|
||||
CHECK(camera);
|
||||
camera_ = camera;
|
||||
|
||||
// Compute depth map in dst
|
||||
{
|
||||
auto t = timers_planar_renderer[TimerPlanarRenderer::compute_depthmap].timeScope();
|
||||
Vector3 X;
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
X = bearings_C_.col(x + camera_->width() * y);
|
||||
// @TODO: derive this formula for the depth to explain it
|
||||
const FloatType numer = -T_P_C.getPosition()[2];
|
||||
const FloatType denom = T_P_C.getRotationMatrix().row(2) * X;
|
||||
const FloatType z = numer / denom;
|
||||
precomputePixelToBearingLookupTable();
|
||||
|
||||
if(out_depthmap)
|
||||
// Preallocate memory for the warping maps
|
||||
cv::Size sensor_size(camera->width(), camera->height());
|
||||
map_x_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
map_y_ = cv::Mat_<float>::zeros(sensor_size);
|
||||
}
|
||||
|
||||
void PlanarRenderer::precomputePixelToBearingLookupTable() {
|
||||
// points_C is a matrix containing the coordinates of each pixel
|
||||
// coordinate in the image plane
|
||||
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||
for (int y = 0; y < camera_->height(); ++y)
|
||||
for (int x = 0; x < camera_->width(); ++x)
|
||||
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
|
||||
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||
}
|
||||
|
||||
void PlanarRenderer::render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
const Transformation T_C_W = T_W_C.inverse();
|
||||
const Transformation T_C_P = T_C_W * T_W_P_;
|
||||
const Transformation T_P_C = T_C_P.inverse();
|
||||
|
||||
ze::Matrix33 tmp;
|
||||
tmp.col(0) = T_C_P.getRotationMatrix().col(0);
|
||||
tmp.col(1) = T_C_P.getRotationMatrix().col(1);
|
||||
tmp.col(2) = T_C_P.getPosition();
|
||||
|
||||
VLOG_EVERY_N(3, 100) << "T_C_P = " << T_C_P;
|
||||
VLOG_EVERY_N(3, 100) << "tmp = " << tmp;
|
||||
HomographyMatrix H_C_P = tmp;
|
||||
|
||||
HomographyMatrix H_P_C;
|
||||
{
|
||||
(*out_depthmap)(y,x) = (ImageFloatType) z;
|
||||
auto t =
|
||||
timers_planar_renderer[TimerPlanarRenderer::inverse_homography]
|
||||
.timeScope();
|
||||
H_P_C = H_C_P.inverse();
|
||||
}
|
||||
|
||||
// clip depth to a reasonable depth range
|
||||
if(z < z_min_ || z > z_max_)
|
||||
{
|
||||
(*out_image)(y,x) = 0;
|
||||
auto t =
|
||||
timers_planar_renderer[TimerPlanarRenderer::compute_remap_maps]
|
||||
.timeScope();
|
||||
ze::Bearings bearings_P = H_P_C * bearings_C_;
|
||||
for (int y = 0; y < camera_->height(); ++y) {
|
||||
for (int x = 0; x < camera_->width(); ++x) {
|
||||
const ze::Bearing& f =
|
||||
bearings_P.col(x + camera_->width() * y);
|
||||
map_x_(y, x) = static_cast<float>(
|
||||
K_src_(0, 0) * f[0] / f[2] + K_src_(0, 2)
|
||||
);
|
||||
map_y_(y, x) = static_cast<float>(
|
||||
K_src_(1, 1) * f[1] / f[2] + K_src_(1, 2)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int border =
|
||||
(extend_border_) ? cv::BORDER_REFLECT : cv::BORDER_CONSTANT;
|
||||
{
|
||||
auto t =
|
||||
timers_planar_renderer[TimerPlanarRenderer::remap].timeScope();
|
||||
cv::remap(
|
||||
texture_,
|
||||
*out_image,
|
||||
map_x_,
|
||||
map_y_,
|
||||
cv::INTER_LINEAR,
|
||||
border,
|
||||
0.8
|
||||
);
|
||||
}
|
||||
|
||||
// Compute depth map in dst
|
||||
{
|
||||
auto t =
|
||||
timers_planar_renderer[TimerPlanarRenderer::compute_depthmap]
|
||||
.timeScope();
|
||||
Vector3 X;
|
||||
for (int y = 0; y < camera_->height(); ++y) {
|
||||
for (int x = 0; x < camera_->width(); ++x) {
|
||||
X = bearings_C_.col(x + camera_->width() * y);
|
||||
// @TODO: derive this formula for the depth to explain it
|
||||
const FloatType numer = -T_P_C.getPosition()[2];
|
||||
const FloatType denom =
|
||||
T_P_C.getRotationMatrix().row(2) * X;
|
||||
const FloatType z = numer / denom;
|
||||
|
||||
if (out_depthmap)
|
||||
(*out_depthmap)(y, x) = (ImageFloatType) z;
|
||||
|
||||
// clip depth to a reasonable depth range
|
||||
if (z < z_min_ || z > z_max_)
|
||||
(*out_image)(y, x) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,5 +1,3 @@
|
||||
#include <esim/rendering/renderer_base.hpp>
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
namespace event_camera_simulator {} // namespace event_camera_simulator
|
||||
|
@ -1,164 +1,235 @@
|
||||
#include <esim/common/utils.hpp>
|
||||
#include <esim/rendering/renderer_factory.hpp>
|
||||
#include <esim/rendering/planar_renderer.hpp>
|
||||
#include <esim/rendering/panorama_renderer.hpp>
|
||||
#include <ze/cameras/camera_models.hpp>
|
||||
#include <ze/cameras/camera_impl.hpp>
|
||||
#include <ze/common/logging.hpp>
|
||||
#include <esim/rendering/planar_renderer.hpp>
|
||||
#include <esim/rendering/renderer_factory.hpp>
|
||||
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <ze/cameras/camera_impl.hpp>
|
||||
#include <ze/cameras/camera_models.hpp>
|
||||
#include <ze/common/logging.hpp>
|
||||
|
||||
DEFINE_int32(renderer_type, 0, " 0: Planar renderer");
|
||||
|
||||
DEFINE_string(renderer_texture, "",
|
||||
"Path to image which will be used to texture the plane");
|
||||
DEFINE_string(
|
||||
renderer_texture,
|
||||
"",
|
||||
"Path to image which will be used to texture the plane"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_hfov_cam_source_deg, 130.0,
|
||||
"Horizontal FoV of the source camera (that captured the image on the plane)");
|
||||
DEFINE_double(
|
||||
renderer_hfov_cam_source_deg,
|
||||
130.0,
|
||||
"Horizontal FoV of the source camera (that captured the image on "
|
||||
"the plane)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_preprocess_median_blur, 0,
|
||||
"Kernel size of the preprocessing median blur.");
|
||||
DEFINE_double(
|
||||
renderer_preprocess_median_blur,
|
||||
0,
|
||||
"Kernel size of the preprocessing median blur."
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_preprocess_gaussian_blur, 0,
|
||||
"Amount of preprocessing Gaussian blur.");
|
||||
DEFINE_double(
|
||||
renderer_preprocess_gaussian_blur,
|
||||
0,
|
||||
"Amount of preprocessing Gaussian blur."
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_x, 0.0,
|
||||
"x position of the center of the plane, in world coordinates");
|
||||
DEFINE_double(
|
||||
renderer_plane_x,
|
||||
0.0,
|
||||
"x position of the center of the plane, in world coordinates"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_y, 0.0,
|
||||
"y position of the center of the plane, in world coordinates");
|
||||
DEFINE_double(
|
||||
renderer_plane_y,
|
||||
0.0,
|
||||
"y position of the center of the plane, in world coordinates"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_z, -1.0,
|
||||
"z position of the center of the plane, in world coordinates");
|
||||
DEFINE_double(
|
||||
renderer_plane_z,
|
||||
-1.0,
|
||||
"z position of the center of the plane, in world coordinates"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_qw, 0.0,
|
||||
"w component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_plane_qw,
|
||||
0.0,
|
||||
"w component of the quaternion q_W_P (orientation of the plane "
|
||||
"with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_qx, 1.0,
|
||||
"x component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_plane_qx,
|
||||
1.0,
|
||||
"x component of the quaternion q_W_P (orientation of the plane "
|
||||
"with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_qy, 0.0,
|
||||
"y component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_plane_qy,
|
||||
0.0,
|
||||
"y component of the quaternion q_W_P (orientation of the plane "
|
||||
"with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_plane_qz, 0.0,
|
||||
"z component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_plane_qz,
|
||||
0.0,
|
||||
"z component of the quaternion q_W_P (orientation of the plane "
|
||||
"with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_z_min, 0.01,
|
||||
"Minimum clipping distance.");
|
||||
DEFINE_double(renderer_z_min, 0.01, "Minimum clipping distance.");
|
||||
|
||||
DEFINE_double(renderer_z_max, 10.0,
|
||||
"Maximum clipping distance.");
|
||||
DEFINE_double(renderer_z_max, 10.0, "Maximum clipping distance.");
|
||||
|
||||
DEFINE_bool(renderer_extend_border, false,
|
||||
"Whether to extend the borders of the plane to infinity or not.");
|
||||
DEFINE_bool(
|
||||
renderer_extend_border,
|
||||
false,
|
||||
"Whether to extend the borders of the plane to infinity or not."
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_panorama_qw, 0.0,
|
||||
"w component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_panorama_qw,
|
||||
0.0,
|
||||
"w component of the quaternion q_W_P (orientation of the "
|
||||
"panorama with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_panorama_qx, 1.0,
|
||||
"x component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_panorama_qx,
|
||||
1.0,
|
||||
"x component of the quaternion q_W_P (orientation of the "
|
||||
"panorama with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_panorama_qy, 0.0,
|
||||
"y component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_panorama_qy,
|
||||
0.0,
|
||||
"y component of the quaternion q_W_P (orientation of the "
|
||||
"panorama with respect to the world)"
|
||||
);
|
||||
|
||||
DEFINE_double(renderer_panorama_qz, 0.0,
|
||||
"z component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||
DEFINE_double(
|
||||
renderer_panorama_qz,
|
||||
0.0,
|
||||
"z component of the quaternion q_W_P (orientation of the "
|
||||
"panorama with respect to the world)"
|
||||
);
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img)
|
||||
{
|
||||
CHECK(img);
|
||||
VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture << ".";
|
||||
*img = cv::imread(path_to_img, 0);
|
||||
if(!img->data)
|
||||
{
|
||||
LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture << ".";
|
||||
return false;
|
||||
}
|
||||
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img) {
|
||||
CHECK(img);
|
||||
VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture
|
||||
<< ".";
|
||||
*img = cv::imread(path_to_img, 0);
|
||||
if (!img->data) {
|
||||
LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture
|
||||
<< ".";
|
||||
return false;
|
||||
}
|
||||
|
||||
if(FLAGS_renderer_preprocess_median_blur > 1)
|
||||
{
|
||||
VLOG(1) << "Pre-filtering the texture with median filter of size: "
|
||||
<< FLAGS_renderer_preprocess_median_blur << ".";
|
||||
cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur);
|
||||
}
|
||||
if (FLAGS_renderer_preprocess_median_blur > 1) {
|
||||
VLOG(1) << "Pre-filtering the texture with median filter of size: "
|
||||
<< FLAGS_renderer_preprocess_median_blur << ".";
|
||||
cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur);
|
||||
}
|
||||
|
||||
if(FLAGS_renderer_preprocess_gaussian_blur > 0)
|
||||
{
|
||||
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: "
|
||||
<< FLAGS_renderer_preprocess_gaussian_blur << ".";
|
||||
cv::GaussianBlur(*img, *img, cv::Size(15,15), FLAGS_renderer_preprocess_gaussian_blur);
|
||||
}
|
||||
if (FLAGS_renderer_preprocess_gaussian_blur > 0) {
|
||||
VLOG(1
|
||||
) << "Pre-filtering the texture with gaussian filter of size: "
|
||||
<< FLAGS_renderer_preprocess_gaussian_blur << ".";
|
||||
cv::GaussianBlur(
|
||||
*img,
|
||||
*img,
|
||||
cv::Size(15, 15),
|
||||
FLAGS_renderer_preprocess_gaussian_blur
|
||||
);
|
||||
}
|
||||
|
||||
img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0/255.0);
|
||||
img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0 / 255.0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Renderer::Ptr loadRendererFromGflags()
|
||||
{
|
||||
Renderer::Ptr renderer;
|
||||
|
||||
switch (FLAGS_renderer_type)
|
||||
{
|
||||
case 0: // Planar renderer
|
||||
{
|
||||
cv::Mat img_src;
|
||||
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
double f_src = hfovToFocalLength(FLAGS_renderer_hfov_cam_source_deg, img_src.cols);
|
||||
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
|
||||
img_src.cols, img_src.rows, ze::CameraType::Pinhole,
|
||||
(Vector4() << f_src, f_src, 0.5 * img_src.cols, 0.5 * img_src.rows).finished(),
|
||||
ze::VectorX());
|
||||
|
||||
Transformation T_W_P;
|
||||
T_W_P.getPosition() = ze::Position(FLAGS_renderer_plane_x,
|
||||
FLAGS_renderer_plane_y,
|
||||
FLAGS_renderer_plane_z);
|
||||
|
||||
T_W_P.getRotation() = ze::Quaternion(FLAGS_renderer_plane_qw,
|
||||
FLAGS_renderer_plane_qx,
|
||||
FLAGS_renderer_plane_qy,
|
||||
FLAGS_renderer_plane_qz);
|
||||
|
||||
renderer.reset(new PlanarRenderer(img_src, cam_src,
|
||||
T_W_P,
|
||||
FLAGS_renderer_z_min,
|
||||
FLAGS_renderer_z_max,
|
||||
FLAGS_renderer_extend_border));
|
||||
|
||||
break;
|
||||
return true;
|
||||
}
|
||||
case 1: // Panorama renderer
|
||||
{
|
||||
cv::Mat img_src;
|
||||
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Transformation::Rotation R_W_P;
|
||||
R_W_P = ze::Quaternion(FLAGS_renderer_panorama_qw,
|
||||
FLAGS_renderer_panorama_qx,
|
||||
FLAGS_renderer_panorama_qy,
|
||||
FLAGS_renderer_panorama_qz);
|
||||
Renderer::Ptr loadRendererFromGflags() {
|
||||
Renderer::Ptr renderer;
|
||||
|
||||
renderer.reset(new PanoramaRenderer(img_src, R_W_P));
|
||||
break;
|
||||
switch (FLAGS_renderer_type) {
|
||||
case 0: // Planar renderer
|
||||
{
|
||||
cv::Mat img_src;
|
||||
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||
return nullptr;
|
||||
|
||||
double f_src = hfovToFocalLength(
|
||||
FLAGS_renderer_hfov_cam_source_deg,
|
||||
img_src.cols
|
||||
);
|
||||
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
|
||||
img_src.cols,
|
||||
img_src.rows,
|
||||
ze::CameraType::Pinhole,
|
||||
(Vector4() << f_src,
|
||||
f_src,
|
||||
0.5 * img_src.cols,
|
||||
0.5 * img_src.rows)
|
||||
.finished(),
|
||||
ze::VectorX()
|
||||
);
|
||||
|
||||
Transformation T_W_P;
|
||||
T_W_P.getPosition() = ze::Position(
|
||||
FLAGS_renderer_plane_x,
|
||||
FLAGS_renderer_plane_y,
|
||||
FLAGS_renderer_plane_z
|
||||
);
|
||||
|
||||
T_W_P.getRotation() = ze::Quaternion(
|
||||
FLAGS_renderer_plane_qw,
|
||||
FLAGS_renderer_plane_qx,
|
||||
FLAGS_renderer_plane_qy,
|
||||
FLAGS_renderer_plane_qz
|
||||
);
|
||||
|
||||
renderer.reset(new PlanarRenderer(
|
||||
img_src,
|
||||
cam_src,
|
||||
T_W_P,
|
||||
FLAGS_renderer_z_min,
|
||||
FLAGS_renderer_z_max,
|
||||
FLAGS_renderer_extend_border
|
||||
));
|
||||
|
||||
break;
|
||||
}
|
||||
case 1: // Panorama renderer
|
||||
{
|
||||
cv::Mat img_src;
|
||||
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||
return nullptr;
|
||||
|
||||
Transformation::Rotation R_W_P;
|
||||
R_W_P = ze::Quaternion(
|
||||
FLAGS_renderer_panorama_qw,
|
||||
FLAGS_renderer_panorama_qx,
|
||||
FLAGS_renderer_panorama_qy,
|
||||
FLAGS_renderer_panorama_qz
|
||||
);
|
||||
|
||||
renderer.reset(new PanoramaRenderer(img_src, R_W_P));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
LOG(FATAL) << "Renderer type is not known.";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return renderer;
|
||||
}
|
||||
default:
|
||||
{
|
||||
LOG(FATAL) << "Renderer type is not known.";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return renderer;
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -4,31 +4,39 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
class UnrealCvClient; // fwd
|
||||
class UnrealCvClient; // fwd
|
||||
|
||||
class UnrealCvRenderer : public Renderer
|
||||
{
|
||||
public:
|
||||
class UnrealCvRenderer : public Renderer {
|
||||
public:
|
||||
UnrealCvRenderer();
|
||||
|
||||
UnrealCvRenderer();
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const;
|
||||
|
||||
//! Render image and depth map for a given camera pose
|
||||
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const;
|
||||
void render(
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
|
||||
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const
|
||||
{
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
//! Returns true if the rendering engine can compute optic flow, false
|
||||
//! otherwise
|
||||
virtual bool canComputeOpticFlow() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||
virtual bool canComputeOpticFlow() const override { return false; }
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
|
||||
virtual void setCamera(const ze::Camera::Ptr& camera) override;
|
||||
private:
|
||||
std::shared_ptr<UnrealCvClient> client_;
|
||||
mutable size_t frame_idx_;
|
||||
};
|
||||
|
||||
private:
|
||||
std::shared_ptr<UnrealCvClient> client_;
|
||||
mutable size_t frame_idx_;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -4,85 +4,86 @@
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L580
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees
|
||||
**/
|
||||
inline FloatType clampAxis(FloatType angle)
|
||||
{
|
||||
// returns angle in the range (-360,360)
|
||||
angle = std::fmod(angle, 360.f);
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L580
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to
|
||||
*be given in degrees
|
||||
**/
|
||||
inline FloatType clampAxis(FloatType angle) {
|
||||
// returns angle in the range (-360,360)
|
||||
angle = std::fmod(angle, 360.f);
|
||||
|
||||
if (angle < 0.f)
|
||||
{
|
||||
// shift to [0,360) range
|
||||
angle += 360.f;
|
||||
}
|
||||
if (angle < 0.f) {
|
||||
// shift to [0,360) range
|
||||
angle += 360.f;
|
||||
}
|
||||
|
||||
return angle;
|
||||
}
|
||||
return angle;
|
||||
}
|
||||
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L595$
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees
|
||||
**/
|
||||
inline FloatType normalizeAxis(FloatType angle)
|
||||
{
|
||||
angle = clampAxis(angle);
|
||||
if(angle > 180.f)
|
||||
{
|
||||
// shift to (-180,180]
|
||||
angle -= 360.f;
|
||||
}
|
||||
/**
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L595$
|
||||
* Helper function for eulerFromQuatSingularityTest, angles are expected to
|
||||
*be given in degrees
|
||||
**/
|
||||
inline FloatType normalizeAxis(FloatType angle) {
|
||||
angle = clampAxis(angle);
|
||||
if (angle > 180.f) {
|
||||
// shift to (-180,180]
|
||||
angle -= 360.f;
|
||||
}
|
||||
|
||||
return angle;
|
||||
}
|
||||
return angle;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540
|
||||
* Quaternion given in (x,y,z,w) representation
|
||||
**/
|
||||
void quaternionToEulerUnrealEngine(
|
||||
const Transformation::Rotation& q,
|
||||
FloatType& yaw,
|
||||
FloatType& pitch,
|
||||
FloatType& roll
|
||||
) {
|
||||
const FloatType X = q.x();
|
||||
const FloatType Y = q.y();
|
||||
const FloatType Z = q.z();
|
||||
const FloatType W = q.w();
|
||||
|
||||
/**
|
||||
*
|
||||
* https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540
|
||||
* Quaternion given in (x,y,z,w) representation
|
||||
**/
|
||||
void quaternionToEulerUnrealEngine(const Transformation::Rotation& q, FloatType& yaw, FloatType& pitch, FloatType& roll)
|
||||
{
|
||||
const FloatType X = q.x();
|
||||
const FloatType Y = q.y();
|
||||
const FloatType Z = q.z();
|
||||
const FloatType W = q.w();
|
||||
const FloatType SingularityTest = Z * X - W * Y;
|
||||
const FloatType YawY = 2.f * (W * Z + X * Y);
|
||||
const FloatType YawX = (1.f - 2.f * (Y * Y + Z * Z));
|
||||
|
||||
const FloatType SingularityTest = Z*X-W*Y;
|
||||
const FloatType YawY = 2.f*(W*Z+X*Y);
|
||||
const FloatType YawX = (1.f-2.f*(Y*Y + Z*Z));
|
||||
// reference
|
||||
// http://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
|
||||
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/
|
||||
|
||||
// reference
|
||||
// http://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
|
||||
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/
|
||||
// this value was found from experience, the above websites recommend
|
||||
// different values but that isn't the case for us, so I went through
|
||||
// different testing, and finally found the case where both of world
|
||||
// lives happily.
|
||||
const FloatType SINGULARITY_THRESHOLD = 0.4999995;
|
||||
const FloatType RAD_TO_DEG = (180.0) / CV_PI;
|
||||
|
||||
// this value was found from experience, the above websites recommend different values
|
||||
// but that isn't the case for us, so I went through different testing, and finally found the case
|
||||
// where both of world lives happily.
|
||||
const FloatType SINGULARITY_THRESHOLD = 0.4999995;
|
||||
const FloatType RAD_TO_DEG = (180.0)/CV_PI;
|
||||
|
||||
if (SingularityTest < -SINGULARITY_THRESHOLD)
|
||||
{
|
||||
pitch = -90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(-yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
}
|
||||
else if (SingularityTest > SINGULARITY_THRESHOLD)
|
||||
{
|
||||
pitch = 90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
}
|
||||
else
|
||||
{
|
||||
pitch = std::asin(2.f*(SingularityTest)) * RAD_TO_DEG;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = std::atan2(-2.f*(W*X+Y*Z), (1.f-2.f*(X*X + Y*Y))) * RAD_TO_DEG;
|
||||
}
|
||||
}
|
||||
if (SingularityTest < -SINGULARITY_THRESHOLD) {
|
||||
pitch = -90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(-yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
} else if (SingularityTest > SINGULARITY_THRESHOLD) {
|
||||
pitch = 90.;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = normalizeAxis(yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
|
||||
} else {
|
||||
pitch = std::asin(2.f * (SingularityTest)) * RAD_TO_DEG;
|
||||
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
|
||||
roll = std::atan2(
|
||||
-2.f * (W * X + Y * Z),
|
||||
(1.f - 2.f * (X * X + Y * Y))
|
||||
)
|
||||
* RAD_TO_DEG;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
@ -1,10 +1,10 @@
|
||||
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
|
||||
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
|
||||
#include <esim/imp_unrealcv_renderer/utils.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <ze/common/path_utils.hpp>
|
||||
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
|
||||
#include <iomanip>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <ze/common/path_utils.hpp>
|
||||
|
||||
DEFINE_double(x_offset, 0, "X offset of the trajectory, in meters");
|
||||
DEFINE_double(y_offset, 0, "Y offset of the trajectory, in meters");
|
||||
@ -13,153 +13,184 @@ DECLARE_double(trajectory_multiplier_x);
|
||||
DECLARE_double(trajectory_multiplier_y);
|
||||
DECLARE_double(trajectory_multiplier_z);
|
||||
|
||||
DEFINE_int32(unrealcv_post_median_blur, 0,
|
||||
"If > 0, median blur the raw UnrealCV images"
|
||||
"with a median filter of this size");
|
||||
DEFINE_int32(
|
||||
unrealcv_post_median_blur,
|
||||
0,
|
||||
"If > 0, median blur the raw UnrealCV images"
|
||||
"with a median filter of this size"
|
||||
);
|
||||
|
||||
DEFINE_double(unrealcv_post_gaussian_blur_sigma, 0,
|
||||
"If sigma > 0, Gaussian blur the raw UnrealCV images"
|
||||
"with a Gaussian filter standard deviation sigma.");
|
||||
DEFINE_double(
|
||||
unrealcv_post_gaussian_blur_sigma,
|
||||
0,
|
||||
"If sigma > 0, Gaussian blur the raw UnrealCV images"
|
||||
"with a Gaussian filter standard deviation sigma."
|
||||
);
|
||||
|
||||
DEFINE_string(unrealcv_output_directory, "",
|
||||
"Output directory in which to output the raw RGB images.");
|
||||
DEFINE_string(
|
||||
unrealcv_output_directory,
|
||||
"",
|
||||
"Output directory in which to output the raw RGB images."
|
||||
);
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
UnrealCvRenderer::UnrealCvRenderer()
|
||||
{
|
||||
client_ = std::make_shared<UnrealCvClient>("localhost", "9000");
|
||||
frame_idx_ = 0;
|
||||
}
|
||||
|
||||
void UnrealCvRenderer::setCamera(const ze::Camera::Ptr& camera)
|
||||
{
|
||||
camera_ = camera;
|
||||
|
||||
// compute the horizontal field of view of the camera
|
||||
ze::VectorX intrinsics = camera_->projectionParameters();
|
||||
const FloatType fx = intrinsics(0);
|
||||
const FloatType hfov_deg = 2 * std::atan(0.5 * (FloatType) camera_->width() / fx) * 180. / CV_PI;
|
||||
|
||||
client_->setCameraFOV(static_cast<float>(hfov_deg));
|
||||
client_->setCameraSize(camera->width(), camera->height());
|
||||
}
|
||||
|
||||
|
||||
void UnrealCvRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
|
||||
{
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
VLOG(1) << "T_W_C (ZE) = " << T_W_C;
|
||||
|
||||
const Transformation::TransformationMatrix mT_ZE_C = T_W_C.getTransformationMatrix();
|
||||
Transformation::TransformationMatrix mT_UE_ZE;
|
||||
mT_UE_ZE << 1, 0, 0, 0,
|
||||
0, -1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1;
|
||||
|
||||
Transformation::TransformationMatrix mT_C_UEC;
|
||||
mT_C_UEC << 0, 1, 0, 0,
|
||||
0, 0, -1, 0,
|
||||
1, 0, 0, 0,
|
||||
0, 0, 0, 1;
|
||||
|
||||
// rotate 90 deg to the right so that R_W_C = Identity <=> euler angle (0,0,0) in UnrealCV
|
||||
// Transformation::TransformationMatrix mT_rotate_90_right;
|
||||
// mT_rotate_90_right << 0, -1, 0, 0,
|
||||
// 1, 0, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// mT_rotate_90_right << 1, 0, 0, 0,
|
||||
// 0, 1, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// const Transformation::TransformationMatrix mT_UE_UEC = mT_rotate_90_right * mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation::TransformationMatrix mT_UE_UEC = mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation T_UE_UEC(mT_UE_UEC);
|
||||
|
||||
VLOG(1) << "T_ZE_C = " << mT_ZE_C;
|
||||
VLOG(1) << "T_UE_UEC = " << T_UE_UEC;
|
||||
|
||||
FloatType yaw, pitch, roll;
|
||||
quaternionToEulerUnrealEngine(T_UE_UEC.getRotation(), yaw, pitch, roll);
|
||||
|
||||
const FloatType x_offset = static_cast<FloatType>(FLAGS_x_offset);
|
||||
const FloatType y_offset = static_cast<FloatType>(FLAGS_y_offset);
|
||||
const FloatType z_offset = static_cast<FloatType>(FLAGS_z_offset);
|
||||
|
||||
const FloatType x = 100. * (FLAGS_trajectory_multiplier_x * T_UE_UEC.getPosition()[0] + x_offset);
|
||||
const FloatType y = 100. * (FLAGS_trajectory_multiplier_y * T_UE_UEC.getPosition()[1] + y_offset);
|
||||
const FloatType z = 100. * (FLAGS_trajectory_multiplier_z * T_UE_UEC.getPosition()[2] + z_offset);
|
||||
|
||||
VLOG(1) << yaw << " " << pitch << " " << roll;
|
||||
|
||||
CameraData cam_data = {0,
|
||||
pitch,
|
||||
yaw,
|
||||
roll,
|
||||
x,
|
||||
y,
|
||||
z};
|
||||
|
||||
client_->setCamera(cam_data);
|
||||
cv::Mat img = client_->getImage(0);
|
||||
VLOG(5) << "Got image from the UnrealCV client";
|
||||
|
||||
// (optionally) save raw RGB image to the output directory
|
||||
if(FLAGS_unrealcv_output_directory != "")
|
||||
{
|
||||
std::stringstream ss_nr;
|
||||
ss_nr << std::setw(10) << std::setfill('0') << frame_idx_;
|
||||
std::string path_frame = ze::joinPath(FLAGS_unrealcv_output_directory, "frame_" + ss_nr.str() + ".png");
|
||||
VLOG(1) << "Saving raw RGB image to: " << path_frame;
|
||||
cv::imwrite(path_frame, img, {cv::IMWRITE_PNG_COMPRESSION, 9});
|
||||
}
|
||||
|
||||
cv::Mat img_gray;
|
||||
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
|
||||
|
||||
if(FLAGS_unrealcv_post_median_blur > 0)
|
||||
{
|
||||
cv::medianBlur(img_gray, img_gray, FLAGS_unrealcv_post_median_blur);
|
||||
}
|
||||
|
||||
if(FLAGS_unrealcv_post_gaussian_blur_sigma > 0)
|
||||
{
|
||||
cv::GaussianBlur(img_gray, img_gray, cv::Size(-1,-1), FLAGS_unrealcv_post_gaussian_blur_sigma);
|
||||
}
|
||||
|
||||
cv::resize(img_gray, img_gray, cv::Size(camera_->width(), camera_->height()));
|
||||
img_gray.convertTo(*out_image, cv::DataType<ImageFloatType>::type, 1./255.);
|
||||
|
||||
cv::Mat depth = client_->getDepth(0);
|
||||
VLOG(5) << "Got depth map from the UnrealCV client";
|
||||
CHECK_EQ(depth.type(), CV_32F);
|
||||
|
||||
cv::resize(depth, depth, cv::Size(camera_->width(), camera_->height()));
|
||||
depth.convertTo(*out_depthmap, cv::DataType<ImageFloatType>::type);
|
||||
|
||||
// the depth provided by the unrealcv client is the distance from the scene to the camera center,
|
||||
// we need to convert it to the distance to image plane (see Github issue: https://github.com/unrealcv/unrealcv/issues/14)
|
||||
const ImageFloatType yc = 0.5 * static_cast<ImageFloatType>(camera_->height()) - 1.0;
|
||||
const ImageFloatType xc = 0.5 * static_cast<ImageFloatType>(camera_->width()) - 1.0;
|
||||
const ImageFloatType f = static_cast<ImageFloatType>(camera_->projectionParameters()(0));
|
||||
for(int y=0; y<camera_->height(); ++y)
|
||||
{
|
||||
for(int x=0; x<camera_->width(); ++x)
|
||||
{
|
||||
const ImageFloatType point_depth = (*out_depthmap)(y,x);
|
||||
const ImageFloatType dx = static_cast<ImageFloatType>(x) - xc;
|
||||
const ImageFloatType dy = static_cast<ImageFloatType>(y) - yc;
|
||||
const ImageFloatType distance_from_center = std::sqrt(dx*dx + dy*dy);
|
||||
const ImageFloatType plane_depth = point_depth / std::sqrt(1.0 + std::pow(distance_from_center / f, 2));
|
||||
(*out_depthmap)(y,x) = plane_depth;
|
||||
UnrealCvRenderer::UnrealCvRenderer() {
|
||||
client_ = std::make_shared<UnrealCvClient>("localhost", "9000");
|
||||
frame_idx_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
frame_idx_++;
|
||||
}
|
||||
void UnrealCvRenderer::setCamera(const ze::Camera::Ptr& camera) {
|
||||
camera_ = camera;
|
||||
|
||||
// compute the horizontal field of view of the camera
|
||||
ze::VectorX intrinsics = camera_->projectionParameters();
|
||||
const FloatType fx = intrinsics(0);
|
||||
const FloatType hfov_deg =
|
||||
2 * std::atan(0.5 * (FloatType) camera_->width() / fx) * 180.
|
||||
/ CV_PI;
|
||||
|
||||
client_->setCameraFOV(static_cast<float>(hfov_deg));
|
||||
client_->setCameraSize(camera->width(), camera->height());
|
||||
}
|
||||
|
||||
void UnrealCvRenderer::render(
|
||||
const Transformation& T_W_C,
|
||||
const ImagePtr& out_image,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
CHECK_EQ(out_image->rows, camera_->height());
|
||||
CHECK_EQ(out_image->cols, camera_->width());
|
||||
|
||||
VLOG(1) << "T_W_C (ZE) = " << T_W_C;
|
||||
|
||||
const Transformation::TransformationMatrix mT_ZE_C =
|
||||
T_W_C.getTransformationMatrix();
|
||||
Transformation::TransformationMatrix mT_UE_ZE;
|
||||
mT_UE_ZE << 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
|
||||
|
||||
Transformation::TransformationMatrix mT_C_UEC;
|
||||
mT_C_UEC << 0, 1, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, 1;
|
||||
|
||||
// rotate 90 deg to the right so that R_W_C = Identity <=> euler angle
|
||||
// (0,0,0) in UnrealCV
|
||||
// Transformation::TransformationMatrix mT_rotate_90_right;
|
||||
// mT_rotate_90_right << 0, -1, 0, 0,
|
||||
// 1, 0, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// mT_rotate_90_right << 1, 0, 0, 0,
|
||||
// 0, 1, 0, 0,
|
||||
// 0, 0, 1, 0,
|
||||
// 0, 0, 0, 1;
|
||||
// const Transformation::TransformationMatrix mT_UE_UEC =
|
||||
// mT_rotate_90_right
|
||||
// * mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation::TransformationMatrix mT_UE_UEC =
|
||||
mT_UE_ZE * mT_ZE_C * mT_C_UEC;
|
||||
const Transformation T_UE_UEC(mT_UE_UEC);
|
||||
|
||||
VLOG(1) << "T_ZE_C = " << mT_ZE_C;
|
||||
VLOG(1) << "T_UE_UEC = " << T_UE_UEC;
|
||||
|
||||
FloatType yaw, pitch, roll;
|
||||
quaternionToEulerUnrealEngine(T_UE_UEC.getRotation(), yaw, pitch, roll);
|
||||
|
||||
const FloatType x_offset = static_cast<FloatType>(FLAGS_x_offset);
|
||||
const FloatType y_offset = static_cast<FloatType>(FLAGS_y_offset);
|
||||
const FloatType z_offset = static_cast<FloatType>(FLAGS_z_offset);
|
||||
|
||||
const FloatType x =
|
||||
100.
|
||||
* (FLAGS_trajectory_multiplier_x * T_UE_UEC.getPosition()[0]
|
||||
+ x_offset);
|
||||
const FloatType y =
|
||||
100.
|
||||
* (FLAGS_trajectory_multiplier_y * T_UE_UEC.getPosition()[1]
|
||||
+ y_offset);
|
||||
const FloatType z =
|
||||
100.
|
||||
* (FLAGS_trajectory_multiplier_z * T_UE_UEC.getPosition()[2]
|
||||
+ z_offset);
|
||||
|
||||
VLOG(1) << yaw << " " << pitch << " " << roll;
|
||||
|
||||
CameraData cam_data = {0, pitch, yaw, roll, x, y, z};
|
||||
|
||||
client_->setCamera(cam_data);
|
||||
cv::Mat img = client_->getImage(0);
|
||||
VLOG(5) << "Got image from the UnrealCV client";
|
||||
|
||||
// (optionally) save raw RGB image to the output directory
|
||||
if (FLAGS_unrealcv_output_directory != "") {
|
||||
std::stringstream ss_nr;
|
||||
ss_nr << std::setw(10) << std::setfill('0') << frame_idx_;
|
||||
std::string path_frame = ze::joinPath(
|
||||
FLAGS_unrealcv_output_directory,
|
||||
"frame_" + ss_nr.str() + ".png"
|
||||
);
|
||||
VLOG(1) << "Saving raw RGB image to: " << path_frame;
|
||||
cv::imwrite(path_frame, img, {cv::IMWRITE_PNG_COMPRESSION, 9});
|
||||
}
|
||||
|
||||
cv::Mat img_gray;
|
||||
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
|
||||
|
||||
if (FLAGS_unrealcv_post_median_blur > 0)
|
||||
cv::medianBlur(img_gray, img_gray, FLAGS_unrealcv_post_median_blur);
|
||||
|
||||
if (FLAGS_unrealcv_post_gaussian_blur_sigma > 0) {
|
||||
cv::GaussianBlur(
|
||||
img_gray,
|
||||
img_gray,
|
||||
cv::Size(-1, -1),
|
||||
FLAGS_unrealcv_post_gaussian_blur_sigma
|
||||
);
|
||||
}
|
||||
|
||||
cv::resize(
|
||||
img_gray,
|
||||
img_gray,
|
||||
cv::Size(camera_->width(), camera_->height())
|
||||
);
|
||||
img_gray.convertTo(
|
||||
*out_image,
|
||||
cv::DataType<ImageFloatType>::type,
|
||||
1. / 255.
|
||||
);
|
||||
|
||||
cv::Mat depth = client_->getDepth(0);
|
||||
VLOG(5) << "Got depth map from the UnrealCV client";
|
||||
CHECK_EQ(depth.type(), CV_32F);
|
||||
|
||||
cv::resize(depth, depth, cv::Size(camera_->width(), camera_->height()));
|
||||
depth.convertTo(*out_depthmap, cv::DataType<ImageFloatType>::type);
|
||||
|
||||
// the depth provided by the unrealcv client is the distance from the
|
||||
// scene to the camera center, we need to convert it to the distance to
|
||||
// image plane (see Github issue:
|
||||
// https://github.com/unrealcv/unrealcv/issues/14)
|
||||
const ImageFloatType yc =
|
||||
0.5 * static_cast<ImageFloatType>(camera_->height()) - 1.0;
|
||||
const ImageFloatType xc =
|
||||
0.5 * static_cast<ImageFloatType>(camera_->width()) - 1.0;
|
||||
const ImageFloatType f =
|
||||
static_cast<ImageFloatType>(camera_->projectionParameters()(0));
|
||||
for (int y = 0; y < camera_->height(); ++y) {
|
||||
for (int x = 0; x < camera_->width(); ++x) {
|
||||
const ImageFloatType point_depth = (*out_depthmap)(y, x);
|
||||
const ImageFloatType dx = static_cast<ImageFloatType>(x) - xc;
|
||||
const ImageFloatType dy = static_cast<ImageFloatType>(y) - yc;
|
||||
const ImageFloatType distance_from_center =
|
||||
std::sqrt(dx * dx + dy * dy);
|
||||
const ImageFloatType plane_depth =
|
||||
point_depth
|
||||
/ std::sqrt(1.0 + std::pow(distance_from_center / f, 2));
|
||||
(*out_depthmap)(y, x) = plane_depth;
|
||||
}
|
||||
}
|
||||
|
||||
frame_idx_++;
|
||||
}
|
||||
|
||||
} // namespace event_camera_simulator
|
||||
|
Reference in New Issue
Block a user