initial commit

This commit is contained in:
Henri Rebecq
2018-10-29 17:53:15 +01:00
commit a8c2f0ca43
208 changed files with 554184 additions and 0 deletions

View File

@ -0,0 +1,28 @@
cmake_minimum_required(VERSION 2.8.3)
project(esim_common)
find_package(catkin_simple REQUIRED)
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
catkin_simple()
set(HEADERS
include/esim/common/types.hpp
include/esim/common/utils.hpp
)
set(SOURCES
src/utils.cpp
)
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
##########
# GTESTS #
##########
catkin_add_gtest(test_utils test/test_utils.cpp)
target_link_libraries(test_utils ${PROJECT_NAME} ${OpenCV_LIBRARIES})
cs_install()
cs_export()

View File

@ -0,0 +1,160 @@
#pragma once
#include <ze/common/transformation.hpp>
#include <opencv2/core/core.hpp>
#include <ze/cameras/camera_rig.hpp>
#include <memory>
// FloatType defines the floating point accuracy (single or double precision)
// for the geometric operations (computing rotation matrices, point projection, etc.).
// This should typically be double precision (highest accuracy).
#define FloatType ze::real_t
// ImageFloatType defines the floating point accuracy (single or double precision)
// of the intensity images (and depth images).
// Single precision should be enough there in most cases.
#define ImageFloatType float
namespace event_camera_simulator {
using Translation = ze::Position;
using Vector3 = ze::Vector3;
using Vector4 = ze::Vector4;
using Vector3i = Eigen::Vector3i;
using Transformation = ze::Transformation;
using TransformationVector = ze::TransformationVector;
using TransformationPtr = std::shared_ptr<Transformation>;
using Normal = ze::Vector3;
using CalibrationMatrix = ze::Matrix3;
using RotationMatrix = ze::Matrix3;
using HomographyMatrix = ze::Matrix3;
using AngularVelocity = ze::Vector3;
using LinearVelocity = ze::Vector3;
using AngularVelocityVector = std::vector<AngularVelocity>;
using LinearVelocityVector = std::vector<LinearVelocity>;
using uint16_t = ze::uint16_t;
using Time = ze::int64_t;
using Duration = ze::uint64_t;
using Image = cv::Mat_<ImageFloatType>;
using ImagePtr = std::shared_ptr<Image>;
using Depthmap = cv::Mat_<ImageFloatType>;
using OpticFlow = cv::Mat_< cv::Vec<ImageFloatType, 2> >;
using OpticFlowPtr = std::shared_ptr<OpticFlow>;
using DepthmapPtr = std::shared_ptr<Depthmap>;
using ImagePtrVector = std::vector<ImagePtr>;
using DepthmapPtrVector = std::vector<DepthmapPtr>;
using OpticFlowPtrVector = std::vector<OpticFlowPtr>;
using Camera = ze::Camera;
struct Event
{
Event(uint16_t x, uint16_t y, Time t, bool pol)
: x(x),
y(y),
t(t),
pol(pol)
{
}
uint16_t x;
uint16_t y;
Time t;
bool pol;
};
using Events = std::vector<Event>;
using EventsVector = std::vector<Events>;
using EventsPtr = std::shared_ptr<Events>;
struct PointXYZRGB
{
PointXYZRGB(FloatType x, FloatType y, FloatType z,
int red, int green, int blue)
: xyz(x, y, z),
rgb(red, green, blue) {}
PointXYZRGB(const Vector3& xyz)
: xyz(xyz) {}
PointXYZRGB(const Vector3& xyz, const Vector3i& rgb)
: xyz(xyz),
rgb(rgb) {}
Vector3 xyz;
Vector3i rgb;
};
using PointCloud = std::vector<PointXYZRGB>;
using PointCloudVector = std::vector<PointCloud>;
struct SimulatorData
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//! Nanosecond timestamp.
Time timestamp;
//! Camera images.
ImagePtrVector images;
//! Depth maps.
DepthmapPtrVector depthmaps;
//! Optic flow maps.
OpticFlowPtrVector optic_flows;
//! Camera
ze::CameraRig::Ptr camera_rig;
//! An accelerometer measures the specific force (incl. gravity),
//! corrupted by noise and bias.
Vector3 specific_force_corrupted;
//! The angular velocity (in the body frame) corrupted by noise and bias.
Vector3 angular_velocity_corrupted;
//! Groundtruth states.
struct Groundtruth
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//! Pose of the body (i.e. the IMU) expressed in the world frame.
Transformation T_W_B;
//! Accelerometer and gyro bias
Vector3 acc_bias;
Vector3 gyr_bias;
//! Poses of the cameras in the rig expressed in the world frame.
TransformationVector T_W_Cs;
//! Linear and angular velocities (i.e. twists) of the cameras in the rig,
//! expressed in each camera's local coordinate frame.
LinearVelocityVector linear_velocities_;
AngularVelocityVector angular_velocities_;
// dynamic objects
std::vector<Transformation> T_W_OBJ_;
std::vector<LinearVelocity> linear_velocity_obj_;
std::vector<AngularVelocity> angular_velocity_obj_;
};
Groundtruth groundtruth;
// Flags to indicate whether a value has been updated or not
bool images_updated;
bool depthmaps_updated;
bool optic_flows_updated;
bool twists_updated;
bool poses_updated;
bool imu_updated;
};
} // namespace event_camera_simulator

View File

@ -0,0 +1,59 @@
#pragma once
#include <esim/common/types.hpp>
namespace ze {
class Camera;
}
namespace event_camera_simulator {
inline double degToRad(double deg)
{
return deg * CV_PI / 180.0;
}
inline double hfovToFocalLength(double hfov_deg, int W)
{
return 0.5 * static_cast<double>(W) / std::tan(0.5 * degToRad(hfov_deg));
}
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera);
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera);
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow);
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow);
void gaussianBlur(ImagePtr& I, FloatType sigma);
// Helper class to compute optic flow from a twist vector and depth map
// Precomputes a lookup table for pixel -> bearing vector correspondences
// to accelerate the computation
class OpticFlowHelper
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
ZE_POINTER_TYPEDEFS(OpticFlowHelper);
OpticFlowHelper(const ze::Camera::Ptr& camera);
void computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC,
const DepthmapPtr& depthmap, OpticFlowPtr& flow);
void computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap,
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ,
OpticFlowPtr& flow);
private:
void precomputePixelToBearingLookupTable();
ze::Camera::Ptr camera_;
// Precomputed lookup table from keypoint -> bearing vector
ze::Bearings bearings_C_;
};
} // namespace event_camera_simulator

View File

@ -0,0 +1,19 @@
<?xml version="1.0"?>
<package format="2">
<name>esim_common</name>
<version>0.0.0</version>
<description>Common data types and utils for the event camera simulator.</description>
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
<license>GPLv3</license>
<buildtool_depend>catkin</buildtool_depend>
<buildtool_depend>catkin_simple</buildtool_depend>
<depend>ze_common</depend>
<depend>ze_cameras</depend>
<depend>gflags_catkin</depend>
<depend>glog_catkin</depend>
</package>

View File

@ -0,0 +1,190 @@
#include <esim/common/utils.hpp>
#include <ze/cameras/camera_models.hpp>
#include <opencv2/imgproc/imgproc.hpp>
namespace event_camera_simulator {
OpticFlowHelper::OpticFlowHelper(const ze::Camera::Ptr& camera)
: camera_(camera)
{
CHECK(camera_);
precomputePixelToBearingLookupTable();
}
void OpticFlowHelper::precomputePixelToBearingLookupTable()
{
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
}
}
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
void OpticFlowHelper::computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC,
const DepthmapPtr& depthmap, OpticFlowPtr& flow)
{
CHECK(depthmap);
CHECK_EQ(depthmap->rows, camera_->height());
CHECK_EQ(depthmap->cols, camera_->width());
CHECK(depthmap->isContinuous());
const ze::Vector3 w_CW = -w_WC; // rotation speed of the world wrt the camera
const ze::Vector3 v_CW = -v_WC; // speed of the world wrt the camera
const ze::Matrix33 R_CW = ze::skewSymmetric(w_CW);
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols);
ze::Positions Xs = bearings_C_;
Xs.array().rowwise() *= depths.cast<FloatType>().array();
ze::Matrix6X dproject_dX =
camera_->dProject_dLandmarkVectorized(Xs);
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const Vector3 X = Xs.col(x + camera_->width() * y);
ze::Matrix31 dXdt = R_CW * X + v_CW;
ze::Vector2 flow_vec
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
}
void OpticFlowHelper::computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap,
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ, OpticFlowPtr& flow)
{
CHECK(depthmap);
CHECK_EQ(depthmap->rows, camera_->height());
CHECK_EQ(depthmap->cols, camera_->width());
CHECK(depthmap->isContinuous());
const ze::Matrix33 w_WC_tilde = ze::skewSymmetric(w_WC);
const ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(w_WOBJ);
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols);
ze::Positions Xs = bearings_C_;
Xs.array().rowwise() *= depths.cast<FloatType>().array();
ze::Matrix6X dproject_dX =
camera_->dProject_dLandmarkVectorized(Xs);
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const Vector3 r_CX = Xs.col(x + camera_->width() * y);
const Vector3 r_OBJX = r_CX - r_COBJ;
ze::Matrix31 dXdt = v_WOBJ - v_WC - w_WC_tilde*r_CX + w_WOBJ_tilde*r_OBJX;
ze::Vector2 flow_vec
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
}
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow)
{
CHECK(flow);
FloatType max_squared_magnitude = 0;
for(int y=0; y<flow->rows; ++y)
{
for(int x=0; x<flow->cols; ++x)
{
const FloatType squared_magnitude = cv::norm((*flow)(y,x), cv::NORM_L2SQR);
if(squared_magnitude > max_squared_magnitude)
{
max_squared_magnitude = squared_magnitude;
}
}
}
return std::sqrt(max_squared_magnitude);
}
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow)
{
const size_t height = I->rows;
const size_t width = I->cols;
Image Ix, Iy; // horizontal/vertical gradients of I
// the factor 1/8 accounts for the scaling introduced by the Sobel filter mask
//cv::Sobel(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 3, 1./8.);
//cv::Sobel(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 3, 1./8.);
// the factor 1/32 accounts for the scaling introduced by the Scharr filter mask
cv::Scharr(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 1./32.);
cv::Scharr(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 1./32.);
Image Lx, Ly; // horizontal/vertical gradients of log(I). d(logI)/dx = 1/I * dI/dx
static const ImageFloatType eps = 1e-3; // small additive term to avoid problems at I=0
cv::divide(Ix, *I+eps, Lx);
cv::divide(Iy, *I+eps, Ly);
Image dLdt(height, width);
for(int y=0; y<height; ++y)
{
for(int x=0; x<width; ++x)
{
// dL/dt ~= - <nablaL, flow>
const ImageFloatType dLdt_at_xy =
Lx(y,x) * (*flow)(y,x)[0] +
Ly(y,x) * (*flow)(y,x)[1]; // "-" sign ignored since we are interested in the absolute value...
dLdt(y,x) = std::fabs(dLdt_at_xy);
}
}
double min_dLdt, max_dLdt;
int min_idx, max_idx;
cv::minMaxIdx(dLdt, &min_dLdt, &max_dLdt,
&min_idx, &max_idx);
return static_cast<FloatType>(max_dLdt);
}
void gaussianBlur(ImagePtr& I, FloatType sigma)
{
cv::GaussianBlur(*I, *I, cv::Size(15,15), sigma, sigma);
}
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera)
{
CHECK(camera);
const ze::VectorX params = camera->projectionParameters();
CalibrationMatrix K;
K << params(0), 0, params(2),
0, params(1), params(3),
0, 0, 1;
return K;
}
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera)
{
PointCloud pcl_camera;
for(const Event& ev : events)
{
Vector3 X_c = camera->backProject(ze::Keypoint(ev.x,ev.y));
X_c[0] /= X_c[2];
X_c[1] /= X_c[2];
X_c[2] = 1.;
const ImageFloatType z = depthmap(ev.y,ev.x);
Vector3 P_c = z * X_c;
Vector3i rgb;
static const Vector3i red(255, 0, 0);
static const Vector3i blue(0, 0, 255);
rgb = (ev.pol) ? blue : red;
PointXYZRGB P_c_intensity(P_c, rgb);
pcl_camera.push_back(P_c_intensity);
}
return pcl_camera;
}
} // namespace event_camera_simulator

View File

@ -0,0 +1,213 @@
#include <esim/common/utils.hpp>
#include <ze/common/test_entrypoint.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/cameras/camera_rig.hpp>
#include <ze/common/random.hpp>
std::string getTestDataDir(const std::string& dataset_name)
{
using namespace ze;
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
CHECK(datapath_dir != nullptr)
<< "Did you download the esim_test_data repository and set "
<< "the ESIM_TEST_DATA_PATH environment variable?";
std::string path(datapath_dir);
CHECK(isDir(path)) << "Folder does not exist: " << path;
path = path + "/data/" + dataset_name;
CHECK(isDir(path)) << "Dataset does not exist: " << path;
return path;
}
namespace event_camera_simulator {
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera,
const ze::Vector3& angular_velocity,
const ze::Vector3& linear_velocity,
const DepthmapPtr& depth,
OpticFlowPtr& flow)
{
CHECK(flow);
CHECK_EQ(flow->rows, camera->height());
CHECK_EQ(flow->cols, camera->width());
const FloatType dt = 0.001;
for(int y=0; y<flow->rows; ++y)
{
for(int x=0; x<flow->cols; ++x)
{
ze::Keypoint u_t(x,y);
ze::Bearing f = camera->backProject(u_t);
const FloatType z = static_cast<FloatType>((*depth)(y,x));
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.);
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt);
ze::Transformation::Position dT = -linear_velocity * dt;
// Transform Xc(t) to Xc(t+dt)
ze::Transformation T_tdt_t;
T_tdt_t.getRotation() = dR;
T_tdt_t.getPosition() = dT;
VLOG(5) << T_tdt_t;
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
// Project Xc(t+dt) in the image plane
ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
VLOG(5) << u_t;
VLOG(5) << u_t_dt;
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
}
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera,
const ze::Vector3& angular_velocity,
const ze::Vector3& linear_velocity,
const DepthmapPtr& depth,
const ze::Vector3& r_COBJ,
const ze::Vector3& angular_velocity_obj,
const ze::Vector3& linear_velocity_obj,
OpticFlowPtr& flow)
{
CHECK(flow);
CHECK_EQ(flow->rows, camera->height());
CHECK_EQ(flow->cols, camera->width());
const FloatType dt = 0.001;
for(int y=0; y<flow->rows; ++y)
{
for(int x=0; x<flow->cols; ++x)
{
ze::Keypoint u_t(x,y);
ze::Bearing f = camera->backProject(u_t);
const FloatType z = static_cast<FloatType>((*depth)(y,x));
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.);
ze::Position r_OBJX = Xc_t - r_COBJ;
ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(angular_velocity_obj);
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt);
ze::Transformation::Position dT = linear_velocity_obj*dt - linear_velocity * dt + w_WOBJ_tilde*r_OBJX*dt;
// Transform Xc(t) to Xc(t+dt)
ze::Transformation T_tdt_t;
T_tdt_t.getRotation() = dR;
T_tdt_t.getPosition() = dT;
VLOG(5) << T_tdt_t;
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
// Project Xc(t+dt) in the image plane
ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
VLOG(5) << u_t;
VLOG(5) << u_t_dt;
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
}
} // event_camera_simulator
TEST(Utils, testOpticFlowComputation)
{
using namespace event_camera_simulator;
// Load camera calib from folder
const std::string path_to_data_folder = getTestDataDir("camera_calibs");
ze::CameraRig::Ptr camera_rig
= ze::cameraRigFromYaml(ze::joinPath(path_to_data_folder, "pinhole_mono.yaml"));
CHECK(camera_rig);
const ze::Camera::Ptr camera = camera_rig->atShared(0);
cv::Size sensor_size(camera->width(), camera->height());
OpticFlowPtr flow_analytic =
std::make_shared<OpticFlow>(sensor_size);
// Sample random depth map
const ImageFloatType z_mean = 5.0;
const ImageFloatType z_stddev = 0.5;
DepthmapPtr depth = std::make_shared<Depthmap>(sensor_size);
for(int y=0; y<sensor_size.height; ++y)
{
for(int x=0; x<sensor_size.width; ++x)
{
(*depth)(y,x) = ze::sampleNormalDistribution(true, z_mean, z_stddev);
}
}
// Sample random linear and angular velocity
ze::Vector3 angular_velocity, linear_velocity;
angular_velocity.setRandom();
linear_velocity.setRandom();
LOG(INFO) << "w = " << angular_velocity;
LOG(INFO) << "v = " << linear_velocity;
// Compute optic flow on the image plane according
// to the sampled angular/linear velocity
OpticFlowHelper optic_flow_helper(camera);
optic_flow_helper.computeFromDepthAndTwist(angular_velocity, linear_velocity,
depth, flow_analytic);
OpticFlowPtr flow_finite_diff =
std::make_shared<OpticFlow>(sensor_size);
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity,
depth, flow_finite_diff);
// Check that the analytical flow and finite-difference flow match
for(int y=0; y<sensor_size.height; ++y)
{
for(int x=0; x<sensor_size.width; ++x)
{
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1);
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1);
}
}
/**********************************************/
/* repeat optic flow test for dynamic objects */
/**********************************************/
// sample random obj position and linear and angular velocity
ze::Vector3 r_COBJ;
r_COBJ.setRandom();
r_COBJ(2) = ze::sampleNormalDistribution(true, z_mean, z_stddev); // assume object is in front of camera
ze::Vector3 angular_velocity_obj, linear_velocity_obj;
angular_velocity_obj.setRandom();
linear_velocity_obj.setRandom();
// Compute optic flow on the image plane according
// to the sampled angular/linear velocity
optic_flow_helper.computeFromDepthCamTwistAndObjDepthAndTwist(angular_velocity, linear_velocity, depth,
r_COBJ, angular_velocity_obj, linear_velocity_obj,
flow_analytic);
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity, depth,
r_COBJ, angular_velocity_obj, linear_velocity_obj,
flow_finite_diff);
// Check that the analytical flow and finite-difference flow match
for(int y=0; y<sensor_size.height; ++y)
{
for(int x=0; x<sensor_size.width; ++x)
{
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1);
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1);
}
}
}
ZE_UNITTEST_ENTRYPOINT