Reformated the project to make it more readable (to me)

This commit is contained in:
Arnaud Fauconnet 2023-07-17 11:45:28 +02:00
parent de6743207d
commit 4738ae7444
66 changed files with 6713 additions and 5880 deletions

View File

@ -1,72 +1,74 @@
#pragma once #pragma once
#include <esim/common/types.hpp>
#include <deque> #include <deque>
#include <esim/common/types.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
class ImageBuffer class ImageBuffer {
{ public:
public: ZE_POINTER_TYPEDEFS(ImageBuffer);
ZE_POINTER_TYPEDEFS(ImageBuffer);
struct ImageData struct ImageData {
{ ImageData(Image img, Time stamp, Duration exposure_time)
ImageData(Image img, Time stamp, Duration exposure_time) : image(img),
: image(img), stamp(stamp),
stamp(stamp), exposure_time(exposure_time) {}
exposure_time(exposure_time) {}
Image image; Image image;
Time stamp; Time stamp;
Duration exposure_time; // timestamp since last image (0 if this is the first image) Duration exposure_time; // timestamp since last image (0 if this is
}; // the first image)
};
using ExposureImage = std::pair<Duration, Image>; using ExposureImage = std::pair<Duration, Image>;
// Rolling image buffer of mazimum size 'buffer_size_ns'. // Rolling image buffer of mazimum size 'buffer_size_ns'.
ImageBuffer(Duration buffer_size_ns) ImageBuffer(Duration buffer_size_ns): buffer_size_ns_(buffer_size_ns) {}
: buffer_size_ns_(buffer_size_ns) {}
void addImage(Time t, const Image& img); void addImage(Time t, const Image& img);
std::deque<ImageData> getRawBuffer() const { return data_; } std::deque<ImageData> getRawBuffer() const {
return data_;
}
size_t size() const { return data_.size(); } size_t size() const {
return data_.size();
}
Duration getExposureTime() const { return buffer_size_ns_; } Duration getExposureTime() const {
return buffer_size_ns_;
}
private: private:
Duration buffer_size_ns_; Duration buffer_size_ns_;
std::deque<ImageData> data_; std::deque<ImageData> data_;
}; };
/*
* The CameraSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate and with
* floating-point precision, and treats each image as a measure of
* irradiance.
* From this, it simulates a real camera, including motion blur.
*
* @TODO: simulate a non-linear camera response curve, shot noise, etc.
*/
class CameraSimulator {
public:
CameraSimulator(double exposure_time_ms)
: exposure_time_(ze::secToNanosec(exposure_time_ms / 1000.0)) {
buffer_.reset(new ImageBuffer(exposure_time_));
}
/* bool imageCallback(
* The CameraSimulator takes as input a sequence of stamped images, const Image& img, Time time, const ImagePtr& camera_image
* assumed to be sampled at a "sufficiently high" framerate and with );
* floating-point precision, and treats each image as a measure of
* irradiance.
* From this, it simulates a real camera, including motion blur.
*
* @TODO: simulate a non-linear camera response curve, shot noise, etc.
*/
class CameraSimulator
{
public:
CameraSimulator(double exposure_time_ms)
: exposure_time_(ze::secToNanosec(exposure_time_ms / 1000.0))
{
buffer_.reset(new ImageBuffer(exposure_time_));
}
bool imageCallback(const Image& img, Time time, private:
const ImagePtr &camera_image); ImageBuffer::Ptr buffer_;
const Duration exposure_time_;
private: };
ImageBuffer::Ptr buffer_;
const Duration exposure_time_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,51 +4,47 @@
namespace event_camera_simulator { namespace event_camera_simulator {
/* /*
* The EventSimulator takes as input a sequence of stamped images, * The EventSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate, * assumed to be sampled at a "sufficiently high" framerate,
* and simulates the principle of operation of an idea event camera * and simulates the principle of operation of an idea event camera
* with a constant contrast threshold C. * with a constant contrast threshold C.
* Pixel-wise intensity values are linearly interpolated in time. * Pixel-wise intensity values are linearly interpolated in time.
* *
* The pixel-wise voltages are reset with the values from the first image * The pixel-wise voltages are reset with the values from the first image
* which is passed to the simulator. * which is passed to the simulator.
*/ */
class EventSimulator class EventSimulator {
{ public:
public: struct Config {
double Cp;
double Cm;
double sigma_Cp;
double sigma_Cm;
Duration refractory_period_ns;
bool use_log_image;
double log_eps;
};
struct Config using TimestampImage = cv::Mat_<ze::real_t>;
{
double Cp;
double Cm;
double sigma_Cp;
double sigma_Cm;
Duration refractory_period_ns;
bool use_log_image;
double log_eps;
};
using TimestampImage = cv::Mat_<ze::real_t>; EventSimulator(const Config& config)
: config_(config),
is_initialized_(false),
current_time_(0) {}
EventSimulator(const Config& config) void init(const Image& img, Time time);
: config_(config), Events imageCallback(const Image& img, Time time);
is_initialized_(false),
current_time_(0)
{}
void init(const Image& img, Time time); private:
Events imageCallback(const Image& img, Time time); bool is_initialized_;
Time current_time_;
Image ref_values_;
Image last_img_;
TimestampImage last_event_timestamp_;
cv::Size size_;
private: Config config_;
bool is_initialized_; };
Time current_time_;
Image ref_values_;
Image last_img_;
TimestampImage last_event_timestamp_;
cv::Size size_;
Config config_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,60 +1,60 @@
#pragma once #pragma once
#include <esim/esim/event_simulator.hpp>
#include <esim/esim/camera_simulator.hpp> #include <esim/esim/camera_simulator.hpp>
#include <esim/esim/event_simulator.hpp>
#include <esim/visualization/publisher_interface.hpp> #include <esim/visualization/publisher_interface.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
/* The Simulator forwards the simulated images / depth maps /* The Simulator forwards the simulated images / depth maps
* from the data provider to multiple, specialized camera simulators, such as: * from the data provider to multiple, specialized camera simulators, such
* (i) event camera simulators that simulate events based on sequences of images * as: (i) event camera simulators that simulate events based on sequences
* (ii) camera simulators that simulate real cameras * of images (ii) camera simulators that simulate real cameras (including
* (including motion blur, camera response function, noise, etc.) * motion blur, camera response function, noise, etc.)
* *
* The Simulator then forwards the simulated data to one or more publishers. * The Simulator then forwards the simulated data to one or more publishers.
*/ */
class Simulator class Simulator {
{ public:
public: Simulator(
Simulator(size_t num_cameras, size_t num_cameras,
const EventSimulator::Config& event_sim_config, const EventSimulator::Config& event_sim_config,
double exposure_time_ms) double exposure_time_ms
: num_cameras_(num_cameras), )
exposure_time_(ze::millisecToNanosec(exposure_time_ms)) : num_cameras_(num_cameras),
{ exposure_time_(ze::millisecToNanosec(exposure_time_ms)) {
for(size_t i=0; i<num_cameras_; ++i) for (size_t i = 0; i < num_cameras_; ++i) {
{ event_simulators_.push_back(EventSimulator(event_sim_config));
event_simulators_.push_back(EventSimulator(event_sim_config)); camera_simulators_.push_back(CameraSimulator(exposure_time_ms));
camera_simulators_.push_back(CameraSimulator(exposure_time_ms)); }
} }
}
~Simulator(); ~Simulator();
void addPublisher(const Publisher::Ptr& publisher) void addPublisher(const Publisher::Ptr& publisher) {
{ CHECK(publisher);
CHECK(publisher); publishers_.push_back(std::move(publisher));
publishers_.push_back(std::move(publisher)); }
}
void dataProviderCallback(const SimulatorData& sim_data); void dataProviderCallback(const SimulatorData& sim_data);
void publishData(const SimulatorData &sim_data, void publishData(
const EventsVector &events, const SimulatorData& sim_data,
bool camera_simulator_success, const EventsVector& events,
const ImagePtrVector &camera_images); bool camera_simulator_success,
const ImagePtrVector& camera_images
);
private: private:
size_t num_cameras_; size_t num_cameras_;
std::vector<EventSimulator> event_simulators_; std::vector<EventSimulator> event_simulators_;
std::vector<CameraSimulator> camera_simulators_; std::vector<CameraSimulator> camera_simulators_;
Duration exposure_time_; Duration exposure_time_;
std::vector<Publisher::Ptr> publishers_; std::vector<Publisher::Ptr> publishers_;
ImagePtrVector corrupted_camera_images_; ImagePtrVector corrupted_camera_images_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -88,7 +88,6 @@ namespace event_camera_simulator {
<< buffer_->getExposureTime() << std::endl; << buffer_->getExposureTime() << std::endl;
exposures_file_.close(); exposures_file_.close();
*camera_image /= denom; *camera_image /= denom;
cv::Mat disp; cv::Mat disp;
camera_image->convertTo(disp, CV_8U, 255); camera_image->convertTo(disp, CV_8U, 255);

View File

@ -1,119 +1,123 @@
#include <esim/esim/event_simulator.hpp> #include <esim/esim/event_simulator.hpp>
#include <ze/common/random.hpp>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/random.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
void EventSimulator::init(const Image &img, Time time) void EventSimulator::init(const Image& img, Time time) {
{ VLOG(1) << "Initialized event camera simulator with sensor size: "
VLOG(1) << "Initialized event camera simulator with sensor size: " << img.size(); << img.size();
VLOG(1) << "and contrast thresholds: C+ = " << config_.Cp << " , C- = " << config_.Cm; VLOG(1) << "and contrast thresholds: C+ = " << config_.Cp
is_initialized_ = true; << " , C- = " << config_.Cm;
last_img_ = img.clone(); is_initialized_ = true;
ref_values_ = img.clone(); last_img_ = img.clone();
last_event_timestamp_ = TimestampImage::zeros(img.size()); ref_values_ = img.clone();
current_time_ = time; last_event_timestamp_ = TimestampImage::zeros(img.size());
size_ = img.size(); current_time_ = time;
} size_ = img.size();
}
Events EventSimulator::imageCallback(const Image& img, Time time) Events EventSimulator::imageCallback(const Image& img, Time time) {
{ CHECK_GE(time, 0);
CHECK_GE(time, 0); Image preprocessed_img = img.clone();
Image preprocessed_img = img.clone(); if (config_.use_log_image) {
if(config_.use_log_image) LOG_FIRST_N(INFO, 1)
{ << "Converting the image to log image with eps = "
LOG_FIRST_N(INFO, 1) << "Converting the image to log image with eps = " << config_.log_eps << "."; << config_.log_eps << ".";
cv::log(config_.log_eps + img, preprocessed_img); cv::log(config_.log_eps + img, preprocessed_img);
}
if(!is_initialized_)
{
init(preprocessed_img, time);
return {};
}
// For each pixel, check if new events need to be generated since the last image sample
static constexpr ImageFloatType tolerance = 1e-6;
Events events;
Duration delta_t_ns = time - current_time_;
CHECK_GT(delta_t_ns, 0u);
CHECK_EQ(img.size(), size_);
for (int y = 0; y < size_.height; ++y)
{
for (int x = 0; x < size_.width; ++x)
{
ImageFloatType itdt = preprocessed_img(y, x);
ImageFloatType it = last_img_(y, x);
ImageFloatType prev_cross = ref_values_(y, x);
if (std::fabs (it - itdt) > tolerance)
{
ImageFloatType pol = (itdt >= it) ? +1.0 : -1.0;
ImageFloatType C = (pol > 0) ? config_.Cp : config_.Cm;
ImageFloatType sigma_C = (pol > 0) ? config_.sigma_Cp : config_.sigma_Cm;
if(sigma_C > 0)
{
C += ze::sampleNormalDistribution<ImageFloatType>(false, 0, sigma_C);
constexpr ImageFloatType minimum_contrast_threshold = 0.01;
C = std::max(minimum_contrast_threshold, C);
} }
ImageFloatType curr_cross = prev_cross;
bool all_crossings = false;
do if (!is_initialized_) {
{ init(preprocessed_img, time);
curr_cross += pol * C; return {};
}
if ((pol > 0 && curr_cross > it && curr_cross <= itdt) // For each pixel, check if new events need to be generated since the
|| (pol < 0 && curr_cross < it && curr_cross >= itdt)) // last image sample
{ static constexpr ImageFloatType tolerance = 1e-6;
Duration edt = (curr_cross - it) * delta_t_ns / (itdt - it); Events events;
Time t = current_time_ + edt; Duration delta_t_ns = time - current_time_;
// check that pixel (x,y) is not currently in a "refractory" state CHECK_GT(delta_t_ns, 0u);
// i.e. |t-that last_timestamp(x,y)| >= refractory_period CHECK_EQ(img.size(), size_);
const Time last_stamp_at_xy = ze::secToNanosec(last_event_timestamp_(y,x));
CHECK_GE(t, last_stamp_at_xy);
const Duration dt = t - last_stamp_at_xy;
if(last_event_timestamp_(y,x) == 0 || dt >= config_.refractory_period_ns)
{
events.push_back(Event(x, y, t, pol > 0));
last_event_timestamp_(y,x) = ze::nanosecToSecTrunc(t);
}
else
{
VLOG(3) << "Dropping event because time since last event ("
<< dt << " ns) < refractory period ("
<< config_.refractory_period_ns << " ns).";
}
ref_values_(y,x) = curr_cross;
}
else
{
all_crossings = true;
}
} while (!all_crossings);
} // end tolerance
} // end for each pixel
}
// update simvars for next loop for (int y = 0; y < size_.height; ++y) {
current_time_ = time; for (int x = 0; x < size_.width; ++x) {
last_img_ = preprocessed_img.clone(); // it is now the latest image ImageFloatType itdt = preprocessed_img(y, x);
ImageFloatType it = last_img_(y, x);
ImageFloatType prev_cross = ref_values_(y, x);
// Sort the events by increasing timestamps, since this is what if (std::fabs(it - itdt) > tolerance) {
// most event processing algorithms expect ImageFloatType pol = (itdt >= it) ? +1.0 : -1.0;
sort(events.begin(), events.end(), ImageFloatType C = (pol > 0) ? config_.Cp : config_.Cm;
[](const Event& a, const Event& b) -> bool ImageFloatType sigma_C =
{ (pol > 0) ? config_.sigma_Cp : config_.sigma_Cm;
return a.t < b.t; if (sigma_C > 0) {
}); C += ze::sampleNormalDistribution<ImageFloatType>(
false,
0,
sigma_C
);
constexpr ImageFloatType minimum_contrast_threshold =
0.01;
C = std::max(minimum_contrast_threshold, C);
}
ImageFloatType curr_cross = prev_cross;
bool all_crossings = false;
return events; do {
} curr_cross += pol * C;
if ((pol > 0 && curr_cross > it && curr_cross <= itdt)
|| (pol < 0 && curr_cross < it && curr_cross >= itdt
)) {
Duration edt =
(curr_cross - it) * delta_t_ns / (itdt - it);
Time t = current_time_ + edt;
// check that pixel (x,y) is not currently in a
// "refractory" state i.e. |t-that
// last_timestamp(x,y)| >= refractory_period
const Time last_stamp_at_xy =
ze::secToNanosec(last_event_timestamp_(y, x));
CHECK_GE(t, last_stamp_at_xy);
const Duration dt = t - last_stamp_at_xy;
if (last_event_timestamp_(y, x) == 0
|| dt >= config_.refractory_period_ns) {
events.push_back(Event(x, y, t, pol > 0));
last_event_timestamp_(y, x) =
ze::nanosecToSecTrunc(t);
} else {
VLOG(3)
<< "Dropping event because time since last "
"event ("
<< dt << " ns) < refractory period ("
<< config_.refractory_period_ns << " ns).";
}
ref_values_(y, x) = curr_cross;
} else {
all_crossings = true;
}
} while (!all_crossings);
} // end tolerance
} // end for each pixel
}
// update simvars for next loop
current_time_ = time;
last_img_ = preprocessed_img.clone(); // it is now the latest image
// Sort the events by increasing timestamps, since this is what
// most event processing algorithms expect
sort(
events.begin(),
events.end(),
[](const Event& a, const Event& b) -> bool { return a.t < b.t; }
);
return events;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,143 +1,160 @@
#include <esim/common/utils.hpp>
#include <esim/esim/simulator.hpp> #include <esim/esim/simulator.hpp>
#include <ze/common/timer_collection.hpp> #include <ze/common/timer_collection.hpp>
#include <esim/common/utils.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
DECLARE_TIMER(TimerEventSimulator, timers_event_simulator_, DECLARE_TIMER(
simulate_events, TimerEventSimulator,
visualization timers_event_simulator_,
); simulate_events,
visualization
);
Simulator::~Simulator() Simulator::~Simulator() {
{ timers_event_simulator_.saveToFile("/tmp", "event_simulator.csv");
timers_event_simulator_.saveToFile("/tmp", "event_simulator.csv"); }
}
void Simulator::dataProviderCallback(const SimulatorData &sim_data) void Simulator::dataProviderCallback(const SimulatorData& sim_data) {
{ CHECK_EQ(event_simulators_.size(), num_cameras_);
CHECK_EQ(event_simulators_.size(), num_cameras_);
bool camera_simulator_success; bool camera_simulator_success;
if(sim_data.images_updated) if (sim_data.images_updated) {
{ EventsVector events(num_cameras_);
EventsVector events(num_cameras_); Time time = sim_data.timestamp;
Time time = sim_data.timestamp; // simulate the events and camera images for every sensor in the rig
// simulate the events and camera images for every sensor in the rig {
{ auto t = timers_event_simulator_
auto t = timers_event_simulator_[TimerEventSimulator::simulate_events].timeScope(); [TimerEventSimulator::simulate_events]
for(size_t i=0; i<num_cameras_; ++i) .timeScope();
{ for (size_t i = 0; i < num_cameras_; ++i) {
events[i] = event_simulators_[i].imageCallback(*sim_data.images[i], time); events[i] = event_simulators_[i].imageCallback(
*sim_data.images[i],
time
);
if(corrupted_camera_images_.size() < num_cameras_) if (corrupted_camera_images_.size() < num_cameras_) {
{ // allocate memory for the corrupted camera images and
// allocate memory for the corrupted camera images and set them to 0 // set them to 0
corrupted_camera_images_.emplace_back(std::make_shared<Image>(sim_data.images[i]->size())); corrupted_camera_images_.emplace_back(
corrupted_camera_images_[i]->setTo(0.); std::make_shared<Image>(sim_data.images[i]->size())
);
corrupted_camera_images_[i]->setTo(0.);
}
camera_simulator_success =
camera_simulators_[i].imageCallback(
*sim_data.images[i],
time,
corrupted_camera_images_[i]
);
}
}
// publish the simulation data + events
{
auto t =
timers_event_simulator_[TimerEventSimulator::visualization]
.timeScope();
publishData(
sim_data,
events,
camera_simulator_success,
corrupted_camera_images_
);
}
} else {
{
// just forward the simulation data to the publisher
auto t =
timers_event_simulator_[TimerEventSimulator::visualization]
.timeScope();
publishData(
sim_data,
{},
camera_simulator_success,
corrupted_camera_images_
);
}
}
}
void Simulator::publishData(
const SimulatorData& sim_data,
const EventsVector& events,
bool camera_simulator_success,
const ImagePtrVector& camera_images
) {
if (publishers_.empty()) {
LOG_FIRST_N(WARNING, 1) << "No publisher available";
return;
} }
camera_simulator_success = camera_simulators_[i].imageCallback(*sim_data.images[i], time, corrupted_camera_images_[i]); Time time = sim_data.timestamp;
} const Transformation& T_W_B = sim_data.groundtruth.T_W_B;
const TransformationVector& T_W_Cs = sim_data.groundtruth.T_W_Cs;
const ze::CameraRig::Ptr& camera_rig = sim_data.camera_rig;
// Publish the new data (events, images, depth maps, poses, point
// clouds, etc.)
if (!events.empty())
for (const Publisher::Ptr& publisher : publishers_)
publisher->eventsCallback(events);
if (sim_data.poses_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->poseCallback(T_W_B, T_W_Cs, time);
if (sim_data.twists_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->twistCallback(
sim_data.groundtruth.angular_velocities_,
sim_data.groundtruth.linear_velocities_,
time
);
if (sim_data.imu_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->imuCallback(
sim_data.specific_force_corrupted,
sim_data.angular_velocity_corrupted,
time
);
if (camera_rig)
for (const Publisher::Ptr& publisher : publishers_)
publisher->cameraInfoCallback(camera_rig, time);
if (sim_data.images_updated) {
for (const Publisher::Ptr& publisher : publishers_) {
publisher->imageCallback(sim_data.images, time);
if (camera_simulator_success && time >= exposure_time_) {
// the images should be timestamped at mid-exposure
const Time mid_exposure_time = time - 0.5 * exposure_time_;
publisher->imageCorruptedCallback(
camera_images,
mid_exposure_time
);
}
}
}
if (sim_data.depthmaps_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->depthmapCallback(sim_data.depthmaps, time);
if (sim_data.optic_flows_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->opticFlowCallback(sim_data.optic_flows, time);
if (sim_data.depthmaps_updated && !events.empty()) {
PointCloudVector pointclouds(num_cameras_);
for (size_t i = 0; i < num_cameras_; ++i) {
CHECK(sim_data.depthmaps[i]);
pointclouds[i] = eventsToPointCloud(
events[i],
*sim_data.depthmaps[i],
camera_rig->atShared(i)
);
}
for (const Publisher::Ptr& publisher : publishers_)
publisher->pointcloudCallback(pointclouds, time);
}
} }
// publish the simulation data + events
{
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
publishData(sim_data, events, camera_simulator_success, corrupted_camera_images_);
}
}
else
{
{
// just forward the simulation data to the publisher
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
publishData(sim_data, {}, camera_simulator_success, corrupted_camera_images_);
}
}
}
void Simulator::publishData(const SimulatorData& sim_data,
const EventsVector& events,
bool camera_simulator_success,
const ImagePtrVector& camera_images)
{
if(publishers_.empty())
{
LOG_FIRST_N(WARNING, 1) << "No publisher available";
return;
}
Time time = sim_data.timestamp;
const Transformation& T_W_B = sim_data.groundtruth.T_W_B;
const TransformationVector& T_W_Cs = sim_data.groundtruth.T_W_Cs;
const ze::CameraRig::Ptr& camera_rig = sim_data.camera_rig;
// Publish the new data (events, images, depth maps, poses, point clouds, etc.)
if(!events.empty())
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->eventsCallback(events);
}
if(sim_data.poses_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->poseCallback(T_W_B, T_W_Cs, time);
}
if(sim_data.twists_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->twistCallback(sim_data.groundtruth.angular_velocities_,
sim_data.groundtruth.linear_velocities_,
time);
}
if(sim_data.imu_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->imuCallback(sim_data.specific_force_corrupted, sim_data.angular_velocity_corrupted, time);
}
if(camera_rig)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->cameraInfoCallback(camera_rig, time);
}
if(sim_data.images_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
{
publisher->imageCallback(sim_data.images, time);
if(camera_simulator_success && time >= exposure_time_)
{
// the images should be timestamped at mid-exposure
const Time mid_exposure_time = time - 0.5 * exposure_time_;
publisher->imageCorruptedCallback(camera_images, mid_exposure_time);
}
}
}
if(sim_data.depthmaps_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->depthmapCallback(sim_data.depthmaps, time);
}
if(sim_data.optic_flows_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->opticFlowCallback(sim_data.optic_flows, time);
}
if(sim_data.depthmaps_updated && !events.empty())
{
PointCloudVector pointclouds(num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
CHECK(sim_data.depthmaps[i]);
pointclouds[i] = eventsToPointCloud(events[i], *sim_data.depthmaps[i], camera_rig->atShared(i));
}
for(const Publisher::Ptr& publisher : publishers_)
publisher->pointcloudCallback(pointclouds, time);
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,237 +1,225 @@
#include <esim/esim/event_simulator.hpp> #include <esim/esim/event_simulator.hpp>
#include <ze/common/test_entrypoint.hpp>
#include <fstream> #include <fstream>
#include <opencv2/highgui/highgui.hpp>
#include <ze/common/file_utils.hpp> #include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp> #include <ze/common/path_utils.hpp>
#include <ze/common/string_utils.hpp> #include <ze/common/string_utils.hpp>
#include <ze/common/file_utils.hpp> #include <ze/common/test_entrypoint.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
#include <ze/matplotlib/matplotlibcpp.hpp> #include <ze/matplotlib/matplotlibcpp.hpp>
#include <opencv2/highgui/highgui.hpp>
#define USE_OPENCV #define USE_OPENCV
namespace event_camera_simulator namespace event_camera_simulator {
{
class CSVImageLoader class CSVImageLoader {
{ public:
public: CSVImageLoader(const std::string& path_to_data_folder)
CSVImageLoader(const std::string& path_to_data_folder) : path_to_data_folder_(path_to_data_folder) {
: path_to_data_folder_(path_to_data_folder) images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv")
{ );
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv")); CHECK(images_in_str_.is_open());
CHECK(images_in_str_.is_open()); }
}
bool next(int64_t& stamp, Image& img) bool next(int64_t& stamp, Image& img) {
{ std::string line;
std::string line; if (!getline(images_in_str_, line)) {
if(!getline(images_in_str_, line)) LOG(INFO) << "Finished reading all the images in the folder";
{ return false;
LOG(INFO) << "Finished reading all the images in the folder"; }
return false;
}
if('%' != line.at(0) && '#' != line.at(0)) if ('%' != line.at(0) && '#' != line.at(0)) {
{ std::vector<std::string> items =
std::vector<std::string> items = ze::splitString(line, delimiter_); ze::splitString(line, delimiter_);
stamp = std::stoll(items[0]); stamp = std::stoll(items[0]);
const std::string& path_to_image const std::string& path_to_image = ze::joinPath(
= ze::joinPath(path_to_data_folder_, "frame", "cam_0", items[1]); path_to_data_folder_,
"frame",
"cam_0",
items[1]
);
img = cv::imread(path_to_image, 0); img = cv::imread(path_to_image, 0);
CHECK(img.data) << "Error loading image: " << path_to_image; CHECK(img.data) << "Error loading image: " << path_to_image;
return true; return true;
} } else {
else return next(stamp, img);
{ }
return next(stamp, img); }
}
}
private: private:
std::ifstream images_in_str_; std::ifstream images_in_str_;
const char delimiter_{','}; const char delimiter_{','};
std::string path_to_data_folder_; std::string path_to_data_folder_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator
std::string getTestDataDir(const std::string& dataset_name) std::string getTestDataDir(const std::string& dataset_name) {
{ using namespace ze;
using namespace ze;
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH"); const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
CHECK(datapath_dir != nullptr) CHECK(datapath_dir != nullptr)
<< "Did you download the esim_test_data repository and set " << "Did you download the esim_test_data repository and set "
<< "the ESIM_TEST_DATA_PATH environment variable?"; << "the ESIM_TEST_DATA_PATH environment variable?";
std::string path(datapath_dir); std::string path(datapath_dir);
CHECK(isDir(path)) << "Folder does not exist: " << path; CHECK(isDir(path)) << "Folder does not exist: " << path;
path = path + "/data/" + dataset_name; path = path + "/data/" + dataset_name;
CHECK(isDir(path)) << "Dataset does not exist: " << path; CHECK(isDir(path)) << "Dataset does not exist: " << path;
return path; return path;
} }
TEST(EventSimulator, testImageReconstruction) TEST(EventSimulator, testImageReconstruction) {
{ using namespace event_camera_simulator;
using namespace event_camera_simulator;
// Load image sequence from folder // Load image sequence from folder
const std::string path_to_data_folder = getTestDataDir("planar_carpet"); const std::string path_to_data_folder = getTestDataDir("planar_carpet");
CSVImageLoader reader(path_to_data_folder); CSVImageLoader reader(path_to_data_folder);
EventSimulator::Config event_sim_config; EventSimulator::Config event_sim_config;
event_sim_config.Cp = 0.05; event_sim_config.Cp = 0.05;
event_sim_config.Cm = 0.03; event_sim_config.Cm = 0.03;
event_sim_config.sigma_Cp = 0; event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = 0; event_sim_config.sigma_Cm = 0;
event_sim_config.use_log_image = true; event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001; event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config); EventSimulator simulator(event_sim_config);
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp LOG(INFO) << "Testing event camera simulator with C+ = "
<< ", C- = " << event_sim_config.Cm; << event_sim_config.Cp << ", C- = " << event_sim_config.Cm;
const ImageFloatType max_reconstruction_error const ImageFloatType max_reconstruction_error =
= std::max(event_sim_config.Cp, event_sim_config.Cm); std::max(event_sim_config.Cp, event_sim_config.Cm);
bool is_first_image = true; bool is_first_image = true;
Image I, L, L_reconstructed; Image I, L, L_reconstructed;
int64_t stamp; int64_t stamp;
while(reader.next(stamp, I)) while (reader.next(stamp, I)) {
{ I.convertTo(I, cv::DataType<ImageFloatType>::type, 1. / 255.);
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.); cv::log(event_sim_config.log_eps + I, L);
cv::log(event_sim_config.log_eps + I, L);
if(is_first_image) if (is_first_image) {
{ // Initialize reconstructed image from the ground truth image
// Initialize reconstructed image from the ground truth image L_reconstructed = L.clone();
L_reconstructed = L.clone(); is_first_image = false;
is_first_image = false; }
}
Events events = simulator.imageCallback(I, stamp); Events events = simulator.imageCallback(I, stamp);
// Reconstruct next image from previous one using the events in between // Reconstruct next image from previous one using the events in between
for(const Event& e : events) for (const Event& e : events) {
{ ImageFloatType pol = e.pol ? 1. : -1.;
ImageFloatType pol = e.pol ? 1. : -1.; const ImageFloatType C =
const ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm; e.pol ? event_sim_config.Cp : event_sim_config.Cm;
L_reconstructed(e.y,e.x) += pol * C; L_reconstructed(e.y, e.x) += pol * C;
} }
// Check that the reconstruction error is bounded by the contrast thresholds // Check that the reconstruction error is bounded by the contrast
for(int y=0; y<I.rows; ++y) // thresholds
{ for (int y = 0; y < I.rows; ++y) {
for(int x=0; x<I.cols; ++x) for (int x = 0; x < I.cols; ++x) {
{ const ImageFloatType reconstruction_error =
const ImageFloatType reconstruction_error = std::fabs(L_reconstructed(y,x) - L(y,x)); std::fabs(L_reconstructed(y, x) - L(y, x));
VLOG_EVERY_N(2, I.rows * I.cols) << reconstruction_error; VLOG_EVERY_N(2, I.rows * I.cols) << reconstruction_error;
EXPECT_LE(reconstruction_error, max_reconstruction_error); EXPECT_LE(reconstruction_error, max_reconstruction_error);
} }
} }
#ifdef USE_OPENCV #ifdef USE_OPENCV
const ImageFloatType vmin = std::log(event_sim_config.log_eps); const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps); const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin); cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U); disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp); cv::imshow("Reconstructed", disp);
cv::waitKey(1); cv::waitKey(1);
#endif #endif
} }
} }
TEST(EventSimulator, testEvolutionReconstructionError) {
using namespace event_camera_simulator;
TEST(EventSimulator, testEvolutionReconstructionError) // Load image sequence from folder
{ const std::string path_to_data_folder = getTestDataDir("planar_carpet");
using namespace event_camera_simulator; CSVImageLoader reader(path_to_data_folder);
// Load image sequence from folder EventSimulator::Config event_sim_config;
const std::string path_to_data_folder = getTestDataDir("planar_carpet"); event_sim_config.Cp = 0.5;
CSVImageLoader reader(path_to_data_folder); event_sim_config.Cm = event_sim_config.Cp;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = event_sim_config.sigma_Cp;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
const double contrast_bias = 0.0;
EventSimulator::Config event_sim_config; LOG(INFO) << "Testing event camera simulator with C+ = "
event_sim_config.Cp = 0.5; << event_sim_config.Cp << ", C- = " << event_sim_config.Cm;
event_sim_config.Cm = event_sim_config.Cp;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = event_sim_config.sigma_Cp;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
const double contrast_bias = 0.0;
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp std::vector<ze::real_t> times, mean_errors, stddev_errors;
<< ", C- = " << event_sim_config.Cm; bool is_first_image = true;
Image I, L, L_reconstructed;
int64_t stamp;
while (reader.next(stamp, I)) {
LOG_EVERY_N(INFO, 50) << "t = " << ze::nanosecToSecTrunc(stamp) << " s";
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1. / 255.);
cv::log(event_sim_config.log_eps + I, L);
std::vector<ze::real_t> times, mean_errors, stddev_errors; if (is_first_image) {
bool is_first_image = true; // Initialize reconstructed image from the ground truth image
Image I, L, L_reconstructed; L_reconstructed = L.clone();
int64_t stamp; is_first_image = false;
while(reader.next(stamp, I)) }
{
LOG_EVERY_N(INFO, 50) << "t = " << ze::nanosecToSecTrunc(stamp) << " s";
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.);
cv::log(event_sim_config.log_eps + I, L);
if(is_first_image) Events events = simulator.imageCallback(I, stamp);
{
// Initialize reconstructed image from the ground truth image
L_reconstructed = L.clone();
is_first_image = false;
}
Events events = simulator.imageCallback(I, stamp); // Reconstruct next image from previous one using the events in between
for (const Event& e : events) {
ImageFloatType pol = e.pol ? 1. : -1.;
ImageFloatType C =
e.pol ? event_sim_config.Cp : event_sim_config.Cm;
C += contrast_bias;
L_reconstructed(e.y, e.x) += pol * C;
}
// Reconstruct next image from previous one using the events in between // Compute the mean and average reconstruction error over the whole
for(const Event& e : events) // image
{ Image error;
ImageFloatType pol = e.pol ? 1. : -1.; cv::absdiff(L, L_reconstructed, error);
ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm; cv::Scalar mean_error, stddev_error;
C += contrast_bias; cv::meanStdDev(error, mean_error, stddev_error);
L_reconstructed(e.y,e.x) += pol * C; VLOG(1) << "Mean error: " << mean_error << ", Stddev: " << stddev_error;
}
// Compute the mean and average reconstruction error over the whole image times.push_back(ze::nanosecToSecTrunc(stamp));
Image error; mean_errors.push_back(mean_error[0]);
cv::absdiff(L, L_reconstructed, error); stddev_errors.push_back(stddev_error[0]);
cv::Scalar mean_error, stddev_error;
cv::meanStdDev(error, mean_error, stddev_error);
VLOG(1) << "Mean error: " << mean_error
<< ", Stddev: " << stddev_error;
times.push_back(ze::nanosecToSecTrunc(stamp));
mean_errors.push_back(mean_error[0]);
stddev_errors.push_back(stddev_error[0]);
#ifdef USE_OPENCV #ifdef USE_OPENCV
const ImageFloatType vmin = std::log(event_sim_config.log_eps); const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps); const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin); cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U); disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp); cv::imshow("Reconstructed", disp);
cv::waitKey(1); cv::waitKey(1);
#endif #endif
} }
// Plot the mean and stddev reconstruction error over time // Plot the mean and stddev reconstruction error over time
ze::plt::plot(times, mean_errors, "r"); ze::plt::plot(times, mean_errors, "r");
ze::plt::plot(times, stddev_errors, "b--"); ze::plt::plot(times, stddev_errors, "b--");
std::stringstream title; std::stringstream title;
title << "C = " << event_sim_config.Cp title << "C = " << event_sim_config.Cp
<< ", sigma = " << event_sim_config.sigma_Cp << ", sigma = " << event_sim_config.sigma_Cp
<< ", bias = " << contrast_bias; << ", bias = " << contrast_bias;
ze::plt::title(title.str()); ze::plt::title(title.str());
ze::plt::xlabel("Time (s)"); ze::plt::xlabel("Time (s)");
ze::plt::ylabel("Mean / Stddev reconstruction error"); ze::plt::ylabel("Mean / Stddev reconstruction error");
ze::plt::grid(true); ze::plt::grid(true);
ze::plt::save("/tmp/evolution_reconstruction_error.pdf"); ze::plt::save("/tmp/evolution_reconstruction_error.pdf");
ze::plt::show(); ze::plt::show();
} }
ZE_UNITTEST_ENTRYPOINT ZE_UNITTEST_ENTRYPOINT

View File

@ -1,160 +1,155 @@
#pragma once #pragma once
#include <ze/common/transformation.hpp> #include <memory>
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <ze/cameras/camera_rig.hpp> #include <ze/cameras/camera_rig.hpp>
#include <memory> #include <ze/common/transformation.hpp>
// FloatType defines the floating point accuracy (single or double precision) // FloatType defines the floating point accuracy (single or double precision)
// for the geometric operations (computing rotation matrices, point projection, etc.). // for the geometric operations (computing rotation matrices, point projection,
// This should typically be double precision (highest accuracy). // etc.). This should typically be double precision (highest accuracy).
#define FloatType ze::real_t #define FloatType ze::real_t
// ImageFloatType defines the floating point accuracy (single or double precision) // ImageFloatType defines the floating point accuracy (single or double
// of the intensity images (and depth images). // precision) of the intensity images (and depth images). Single precision
// Single precision should be enough there in most cases. // should be enough there in most cases.
#define ImageFloatType float #define ImageFloatType float
namespace event_camera_simulator { namespace event_camera_simulator {
using Translation = ze::Position; using Translation = ze::Position;
using Vector3 = ze::Vector3; using Vector3 = ze::Vector3;
using Vector4 = ze::Vector4; using Vector4 = ze::Vector4;
using Vector3i = Eigen::Vector3i; using Vector3i = Eigen::Vector3i;
using Transformation = ze::Transformation; using Transformation = ze::Transformation;
using TransformationVector = ze::TransformationVector; using TransformationVector = ze::TransformationVector;
using TransformationPtr = std::shared_ptr<Transformation>; using TransformationPtr = std::shared_ptr<Transformation>;
using Normal = ze::Vector3; using Normal = ze::Vector3;
using CalibrationMatrix = ze::Matrix3; using CalibrationMatrix = ze::Matrix3;
using RotationMatrix = ze::Matrix3; using RotationMatrix = ze::Matrix3;
using HomographyMatrix = ze::Matrix3; using HomographyMatrix = ze::Matrix3;
using AngularVelocity = ze::Vector3; using AngularVelocity = ze::Vector3;
using LinearVelocity = ze::Vector3; using LinearVelocity = ze::Vector3;
using AngularVelocityVector = std::vector<AngularVelocity>; using AngularVelocityVector = std::vector<AngularVelocity>;
using LinearVelocityVector = std::vector<LinearVelocity>; using LinearVelocityVector = std::vector<LinearVelocity>;
using uint16_t = ze::uint16_t; using uint16_t = ze::uint16_t;
using Time = ze::int64_t; using Time = ze::int64_t;
using Duration = ze::uint64_t; using Duration = ze::uint64_t;
using Image = cv::Mat_<ImageFloatType>; using Image = cv::Mat_<ImageFloatType>;
using ImagePtr = std::shared_ptr<Image>; using ImagePtr = std::shared_ptr<Image>;
using Depthmap = cv::Mat_<ImageFloatType>; using Depthmap = cv::Mat_<ImageFloatType>;
using OpticFlow = cv::Mat_< cv::Vec<ImageFloatType, 2> >; using OpticFlow = cv::Mat_<cv::Vec<ImageFloatType, 2>>;
using OpticFlowPtr = std::shared_ptr<OpticFlow>; using OpticFlowPtr = std::shared_ptr<OpticFlow>;
using DepthmapPtr = std::shared_ptr<Depthmap>; using DepthmapPtr = std::shared_ptr<Depthmap>;
using ImagePtrVector = std::vector<ImagePtr>; using ImagePtrVector = std::vector<ImagePtr>;
using DepthmapPtrVector = std::vector<DepthmapPtr>; using DepthmapPtrVector = std::vector<DepthmapPtr>;
using OpticFlowPtrVector = std::vector<OpticFlowPtr>; using OpticFlowPtrVector = std::vector<OpticFlowPtr>;
using Camera = ze::Camera; using Camera = ze::Camera;
struct Event struct Event {
{ Event(uint16_t x, uint16_t y, Time t, bool pol)
Event(uint16_t x, uint16_t y, Time t, bool pol) : x(x),
: x(x), y(y),
y(y), t(t),
t(t), pol(pol) {}
pol(pol)
{
} uint16_t x;
uint16_t y;
Time t;
bool pol;
};
uint16_t x; using Events = std::vector<Event>;
uint16_t y; using EventsVector = std::vector<Events>;
Time t; using EventsPtr = std::shared_ptr<Events>;
bool pol;
};
using Events = std::vector<Event>; struct PointXYZRGB {
using EventsVector = std::vector<Events>; PointXYZRGB(
using EventsPtr = std::shared_ptr<Events>; FloatType x, FloatType y, FloatType z, int red, int green, int blue
)
: xyz(x, y, z),
rgb(red, green, blue) {}
struct PointXYZRGB PointXYZRGB(const Vector3& xyz): xyz(xyz) {}
{
PointXYZRGB(FloatType x, FloatType y, FloatType z,
int red, int green, int blue)
: xyz(x, y, z),
rgb(red, green, blue) {}
PointXYZRGB(const Vector3& xyz) PointXYZRGB(const Vector3& xyz, const Vector3i& rgb)
: xyz(xyz) {} : xyz(xyz),
rgb(rgb) {}
PointXYZRGB(const Vector3& xyz, const Vector3i& rgb) Vector3 xyz;
: xyz(xyz), Vector3i rgb;
rgb(rgb) {} };
Vector3 xyz; using PointCloud = std::vector<PointXYZRGB>;
Vector3i rgb; using PointCloudVector = std::vector<PointCloud>;
};
using PointCloud = std::vector<PointXYZRGB>; struct SimulatorData {
using PointCloudVector = std::vector<PointCloud>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW
struct SimulatorData //! Nanosecond timestamp.
{ Time timestamp;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//! Nanosecond timestamp. //! Camera images.
Time timestamp; ImagePtrVector images;
//! Camera images. //! Depth maps.
ImagePtrVector images; DepthmapPtrVector depthmaps;
//! Depth maps. //! Optic flow maps.
DepthmapPtrVector depthmaps; OpticFlowPtrVector optic_flows;
//! Optic flow maps. //! Camera
OpticFlowPtrVector optic_flows; ze::CameraRig::Ptr camera_rig;
//! Camera //! An accelerometer measures the specific force (incl. gravity),
ze::CameraRig::Ptr camera_rig; //! corrupted by noise and bias.
Vector3 specific_force_corrupted;
//! An accelerometer measures the specific force (incl. gravity), //! The angular velocity (in the body frame) corrupted by noise and
//! corrupted by noise and bias. //! bias.
Vector3 specific_force_corrupted; Vector3 angular_velocity_corrupted;
//! The angular velocity (in the body frame) corrupted by noise and bias. //! Groundtruth states.
Vector3 angular_velocity_corrupted; struct Groundtruth {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//! Groundtruth states. //! Pose of the body (i.e. the IMU) expressed in the world frame.
struct Groundtruth Transformation T_W_B;
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//! Pose of the body (i.e. the IMU) expressed in the world frame. //! Accelerometer and gyro bias
Transformation T_W_B; Vector3 acc_bias;
Vector3 gyr_bias;
//! Accelerometer and gyro bias //! Poses of the cameras in the rig expressed in the world frame.
Vector3 acc_bias; TransformationVector T_W_Cs;
Vector3 gyr_bias;
//! Poses of the cameras in the rig expressed in the world frame. //! Linear and angular velocities (i.e. twists) of the cameras in
TransformationVector T_W_Cs; //! the rig, expressed in each camera's local coordinate frame.
LinearVelocityVector linear_velocities_;
AngularVelocityVector angular_velocities_;
//! Linear and angular velocities (i.e. twists) of the cameras in the rig, // dynamic objects
//! expressed in each camera's local coordinate frame. std::vector<Transformation> T_W_OBJ_;
LinearVelocityVector linear_velocities_; std::vector<LinearVelocity> linear_velocity_obj_;
AngularVelocityVector angular_velocities_; std::vector<AngularVelocity> angular_velocity_obj_;
};
// dynamic objects Groundtruth groundtruth;
std::vector<Transformation> T_W_OBJ_;
std::vector<LinearVelocity> linear_velocity_obj_;
std::vector<AngularVelocity> angular_velocity_obj_;
};
Groundtruth groundtruth;
// Flags to indicate whether a value has been updated or not // Flags to indicate whether a value has been updated or not
bool images_updated; bool images_updated;
bool depthmaps_updated; bool depthmaps_updated;
bool optic_flows_updated; bool optic_flows_updated;
bool twists_updated; bool twists_updated;
bool poses_updated; bool poses_updated;
bool imu_updated; bool imu_updated;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -3,57 +3,70 @@
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
namespace ze { namespace ze {
class Camera; class Camera;
} }
namespace event_camera_simulator { namespace event_camera_simulator {
inline double degToRad(double deg) inline double degToRad(double deg) {
{ return deg * CV_PI / 180.0;
return deg * CV_PI / 180.0; }
}
inline double hfovToFocalLength(double hfov_deg, int W) inline double hfovToFocalLength(double hfov_deg, int W) {
{ return 0.5 * static_cast<double>(W)
return 0.5 * static_cast<double>(W) / std::tan(0.5 * degToRad(hfov_deg)); / std::tan(0.5 * degToRad(hfov_deg));
} }
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera); CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera);
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera); PointCloud eventsToPointCloud(
const Events& events,
const Depthmap& depthmap,
const ze::Camera::Ptr& camera
);
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow); FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow);
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow); FloatType maxPredictedAbsBrightnessChange(
const ImagePtr& I, const OpticFlowPtr& flow
);
void gaussianBlur(ImagePtr& I, FloatType sigma); void gaussianBlur(ImagePtr& I, FloatType sigma);
// Helper class to compute optic flow from a twist vector and depth map // Helper class to compute optic flow from a twist vector and depth map
// Precomputes a lookup table for pixel -> bearing vector correspondences // Precomputes a lookup table for pixel -> bearing vector correspondences
// to accelerate the computation // to accelerate the computation
class OpticFlowHelper class OpticFlowHelper {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
ZE_POINTER_TYPEDEFS(OpticFlowHelper); ZE_POINTER_TYPEDEFS(OpticFlowHelper);
OpticFlowHelper(const ze::Camera::Ptr& camera); OpticFlowHelper(const ze::Camera::Ptr& camera);
void computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, void computeFromDepthAndTwist(
const DepthmapPtr& depthmap, OpticFlowPtr& flow); const ze::Vector3& w_WC,
const ze::Vector3& v_WC,
const DepthmapPtr& depthmap,
OpticFlowPtr& flow
);
void computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap, void computeFromDepthCamTwistAndObjDepthAndTwist(
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ, const ze::Vector3& w_WC,
OpticFlowPtr& flow); const ze::Vector3& v_WC,
const DepthmapPtr& depthmap,
const ze::Vector3& r_COBJ,
const ze::Vector3& w_WOBJ,
const ze::Vector3& v_WOBJ,
OpticFlowPtr& flow
);
private: private:
void precomputePixelToBearingLookupTable();
void precomputePixelToBearingLookupTable(); ze::Camera::Ptr camera_;
ze::Camera::Ptr camera_; // Precomputed lookup table from keypoint -> bearing vector
ze::Bearings bearings_C_;
// Precomputed lookup table from keypoint -> bearing vector };
ze::Bearings bearings_C_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,190 +1,212 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/cameras/camera_models.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <ze/cameras/camera_models.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
OpticFlowHelper::OpticFlowHelper(const ze::Camera::Ptr& camera) OpticFlowHelper::OpticFlowHelper(const ze::Camera::Ptr& camera)
: camera_(camera) : camera_(camera) {
{ CHECK(camera_);
CHECK(camera_); precomputePixelToBearingLookupTable();
precomputePixelToBearingLookupTable();
}
void OpticFlowHelper::precomputePixelToBearingLookupTable()
{
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
} }
}
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
void OpticFlowHelper::computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, void OpticFlowHelper::precomputePixelToBearingLookupTable() {
const DepthmapPtr& depthmap, OpticFlowPtr& flow) // points_C is a matrix containing the coordinates of each pixel
{ // coordinate in the image plane
CHECK(depthmap); ze::Keypoints points_C(2, camera_->width() * camera_->height());
CHECK_EQ(depthmap->rows, camera_->height()); for (int y = 0; y < camera_->height(); ++y)
CHECK_EQ(depthmap->cols, camera_->width()); for (int x = 0; x < camera_->width(); ++x)
CHECK(depthmap->isContinuous()); points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
bearings_C_ = camera_->backProjectVectorized(points_C);
const ze::Vector3 w_CW = -w_WC; // rotation speed of the world wrt the camera bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
const ze::Vector3 v_CW = -v_WC; // speed of the world wrt the camera
const ze::Matrix33 R_CW = ze::skewSymmetric(w_CW);
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols);
ze::Positions Xs = bearings_C_;
Xs.array().rowwise() *= depths.cast<FloatType>().array();
ze::Matrix6X dproject_dX =
camera_->dProject_dLandmarkVectorized(Xs);
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const Vector3 X = Xs.col(x + camera_->width() * y);
ze::Matrix31 dXdt = R_CW * X + v_CW;
ze::Vector2 flow_vec
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
} }
}
}
void OpticFlowHelper::computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap, void OpticFlowHelper::computeFromDepthAndTwist(
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ, OpticFlowPtr& flow) const ze::Vector3& w_WC,
{ const ze::Vector3& v_WC,
CHECK(depthmap); const DepthmapPtr& depthmap,
CHECK_EQ(depthmap->rows, camera_->height()); OpticFlowPtr& flow
CHECK_EQ(depthmap->cols, camera_->width()); ) {
CHECK(depthmap->isContinuous()); CHECK(depthmap);
CHECK_EQ(depthmap->rows, camera_->height());
CHECK_EQ(depthmap->cols, camera_->width());
CHECK(depthmap->isContinuous());
const ze::Matrix33 w_WC_tilde = ze::skewSymmetric(w_WC); const ze::Vector3 w_CW =
const ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(w_WOBJ); -w_WC; // rotation speed of the world wrt the camera
const ze::Vector3 v_CW = -v_WC; // speed of the world wrt the camera
const ze::Matrix33 R_CW = ze::skewSymmetric(w_CW);
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols); Eigen::Map<
ze::Positions Xs = bearings_C_; const Eigen::
Xs.array().rowwise() *= depths.cast<FloatType>().array(); Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>>
depths(
depthmap->ptr<ImageFloatType>(),
1,
depthmap->rows * depthmap->cols
);
ze::Positions Xs = bearings_C_;
Xs.array().rowwise() *= depths.cast<FloatType>().array();
ze::Matrix6X dproject_dX = ze::Matrix6X dproject_dX = camera_->dProject_dLandmarkVectorized(Xs);
camera_->dProject_dLandmarkVectorized(Xs);
for(int y=0; y<camera_->height(); ++y) for (int y = 0; y < camera_->height(); ++y) {
{ for (int x = 0; x < camera_->width(); ++x) {
for(int x=0; x<camera_->width(); ++x) const Vector3 X = Xs.col(x + camera_->width() * y);
{ ze::Matrix31 dXdt = R_CW * X + v_CW;
const Vector3 r_CX = Xs.col(x + camera_->width() * y); ze::Vector2 flow_vec =
const Vector3 r_OBJX = r_CX - r_COBJ; Eigen::Map<ze::Matrix23>(
dproject_dX.col(x + camera_->width() * y).data()
)
* dXdt;
ze::Matrix31 dXdt = v_WOBJ - v_WC - w_WC_tilde*r_CX + w_WOBJ_tilde*r_OBJX; (*flow)(y, x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
ze::Vector2 flow_vec }
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt; }
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
} }
}
}
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow) void OpticFlowHelper::computeFromDepthCamTwistAndObjDepthAndTwist(
{ const ze::Vector3& w_WC,
CHECK(flow); const ze::Vector3& v_WC,
FloatType max_squared_magnitude = 0; const DepthmapPtr& depthmap,
for(int y=0; y<flow->rows; ++y) const ze::Vector3& r_COBJ,
{ const ze::Vector3& w_WOBJ,
for(int x=0; x<flow->cols; ++x) const ze::Vector3& v_WOBJ,
{ OpticFlowPtr& flow
const FloatType squared_magnitude = cv::norm((*flow)(y,x), cv::NORM_L2SQR); ) {
if(squared_magnitude > max_squared_magnitude) CHECK(depthmap);
{ CHECK_EQ(depthmap->rows, camera_->height());
max_squared_magnitude = squared_magnitude; CHECK_EQ(depthmap->cols, camera_->width());
} CHECK(depthmap->isContinuous());
const ze::Matrix33 w_WC_tilde = ze::skewSymmetric(w_WC);
const ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(w_WOBJ);
Eigen::Map<
const Eigen::
Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>>
depths(
depthmap->ptr<ImageFloatType>(),
1,
depthmap->rows * depthmap->cols
);
ze::Positions Xs = bearings_C_;
Xs.array().rowwise() *= depths.cast<FloatType>().array();
ze::Matrix6X dproject_dX = camera_->dProject_dLandmarkVectorized(Xs);
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
const Vector3 r_CX = Xs.col(x + camera_->width() * y);
const Vector3 r_OBJX = r_CX - r_COBJ;
ze::Matrix31 dXdt =
v_WOBJ - v_WC - w_WC_tilde * r_CX + w_WOBJ_tilde * r_OBJX;
ze::Vector2 flow_vec =
Eigen::Map<ze::Matrix23>(
dproject_dX.col(x + camera_->width() * y).data()
)
* dXdt;
(*flow)(y, x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
} }
}
return std::sqrt(max_squared_magnitude);
}
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow) FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow) {
{ CHECK(flow);
const size_t height = I->rows; FloatType max_squared_magnitude = 0;
const size_t width = I->cols; for (int y = 0; y < flow->rows; ++y) {
for (int x = 0; x < flow->cols; ++x) {
Image Ix, Iy; // horizontal/vertical gradients of I const FloatType squared_magnitude =
// the factor 1/8 accounts for the scaling introduced by the Sobel filter mask cv::norm((*flow)(y, x), cv::NORM_L2SQR);
//cv::Sobel(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 3, 1./8.); if (squared_magnitude > max_squared_magnitude)
//cv::Sobel(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 3, 1./8.); max_squared_magnitude = squared_magnitude;
}
// the factor 1/32 accounts for the scaling introduced by the Scharr filter mask }
cv::Scharr(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 1./32.); return std::sqrt(max_squared_magnitude);
cv::Scharr(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 1./32.);
Image Lx, Ly; // horizontal/vertical gradients of log(I). d(logI)/dx = 1/I * dI/dx
static const ImageFloatType eps = 1e-3; // small additive term to avoid problems at I=0
cv::divide(Ix, *I+eps, Lx);
cv::divide(Iy, *I+eps, Ly);
Image dLdt(height, width);
for(int y=0; y<height; ++y)
{
for(int x=0; x<width; ++x)
{
// dL/dt ~= - <nablaL, flow>
const ImageFloatType dLdt_at_xy =
Lx(y,x) * (*flow)(y,x)[0] +
Ly(y,x) * (*flow)(y,x)[1]; // "-" sign ignored since we are interested in the absolute value...
dLdt(y,x) = std::fabs(dLdt_at_xy);
} }
}
double min_dLdt, max_dLdt;
int min_idx, max_idx;
cv::minMaxIdx(dLdt, &min_dLdt, &max_dLdt,
&min_idx, &max_idx);
return static_cast<FloatType>(max_dLdt); FloatType maxPredictedAbsBrightnessChange(
} const ImagePtr& I, const OpticFlowPtr& flow
) {
const size_t height = I->rows;
const size_t width = I->cols;
void gaussianBlur(ImagePtr& I, FloatType sigma) Image Ix, Iy; // horizontal/vertical gradients of I
{ // the factor 1/8 accounts for the scaling introduced by the Sobel
cv::GaussianBlur(*I, *I, cv::Size(15,15), sigma, sigma); // filter mask cv::Sobel(*I, Ix, cv::DataType<ImageFloatType>::type, 1,
} // 0, 3, 1./8.); cv::Sobel(*I, Iy, cv::DataType<ImageFloatType>::type,
// 0, 1, 3, 1./8.);
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera) // the factor 1/32 accounts for the scaling introduced by the Scharr
{ // filter mask
CHECK(camera); cv::Scharr(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 1. / 32.);
const ze::VectorX params = camera->projectionParameters(); cv::Scharr(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 1. / 32.);
CalibrationMatrix K;
K << params(0), 0, params(2),
0, params(1), params(3),
0, 0, 1;
return K;
}
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera) Image Lx,
{ Ly; // horizontal/vertical gradients of log(I). d(logI)/dx = 1/I *
PointCloud pcl_camera; // dI/dx
for(const Event& ev : events) static const ImageFloatType eps =
{ 1e-3; // small additive term to avoid problems at I=0
Vector3 X_c = camera->backProject(ze::Keypoint(ev.x,ev.y)); cv::divide(Ix, *I + eps, Lx);
X_c[0] /= X_c[2]; cv::divide(Iy, *I + eps, Ly);
X_c[1] /= X_c[2];
X_c[2] = 1.; Image dLdt(height, width);
const ImageFloatType z = depthmap(ev.y,ev.x); for (int y = 0; y < height; ++y) {
Vector3 P_c = z * X_c; for (int x = 0; x < width; ++x) {
Vector3i rgb; // dL/dt ~= - <nablaL, flow>
static const Vector3i red(255, 0, 0); const ImageFloatType dLdt_at_xy =
static const Vector3i blue(0, 0, 255); Lx(y, x) * (*flow)(y, x)[0]
rgb = (ev.pol) ? blue : red; + Ly(y, x)
PointXYZRGB P_c_intensity(P_c, rgb); * (*flow)(
pcl_camera.push_back(P_c_intensity); y,
} x
return pcl_camera; )[1]; // "-" sign ignored since we are
} // interested in the absolute value...
dLdt(y, x) = std::fabs(dLdt_at_xy);
}
}
double min_dLdt, max_dLdt;
int min_idx, max_idx;
cv::minMaxIdx(dLdt, &min_dLdt, &max_dLdt, &min_idx, &max_idx);
return static_cast<FloatType>(max_dLdt);
}
void gaussianBlur(ImagePtr& I, FloatType sigma) {
cv::GaussianBlur(*I, *I, cv::Size(15, 15), sigma, sigma);
}
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera) {
CHECK(camera);
const ze::VectorX params = camera->projectionParameters();
CalibrationMatrix K;
K << params(0), 0, params(2), 0, params(1), params(3), 0, 0, 1;
return K;
}
PointCloud eventsToPointCloud(
const Events& events,
const Depthmap& depthmap,
const ze::Camera::Ptr& camera
) {
PointCloud pcl_camera;
for (const Event& ev : events) {
Vector3 X_c = camera->backProject(ze::Keypoint(ev.x, ev.y));
X_c[0] /= X_c[2];
X_c[1] /= X_c[2];
X_c[2] = 1.;
const ImageFloatType z = depthmap(ev.y, ev.x);
Vector3 P_c = z * X_c;
Vector3i rgb;
static const Vector3i red(255, 0, 0);
static const Vector3i blue(0, 0, 255);
rgb = (ev.pol) ? blue : red;
PointXYZRGB P_c_intensity(P_c, rgb);
pcl_camera.push_back(P_c_intensity);
}
return pcl_camera;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,213 +1,252 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/common/test_entrypoint.hpp> #include <ze/cameras/camera_rig.hpp>
#include <ze/common/file_utils.hpp> #include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp> #include <ze/common/path_utils.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/cameras/camera_rig.hpp>
#include <ze/common/random.hpp> #include <ze/common/random.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/common/test_entrypoint.hpp>
std::string getTestDataDir(const std::string& dataset_name) std::string getTestDataDir(const std::string& dataset_name) {
{ using namespace ze;
using namespace ze;
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH"); const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
CHECK(datapath_dir != nullptr) CHECK(datapath_dir != nullptr)
<< "Did you download the esim_test_data repository and set " << "Did you download the esim_test_data repository and set "
<< "the ESIM_TEST_DATA_PATH environment variable?"; << "the ESIM_TEST_DATA_PATH environment variable?";
std::string path(datapath_dir); std::string path(datapath_dir);
CHECK(isDir(path)) << "Folder does not exist: " << path; CHECK(isDir(path)) << "Folder does not exist: " << path;
path = path + "/data/" + dataset_name; path = path + "/data/" + dataset_name;
CHECK(isDir(path)) << "Dataset does not exist: " << path; CHECK(isDir(path)) << "Dataset does not exist: " << path;
return path; return path;
} }
namespace event_camera_simulator { namespace event_camera_simulator {
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera, void computeOpticFlowFiniteDifference(
const ze::Vector3& angular_velocity, const ze::Camera::Ptr& camera,
const ze::Vector3& linear_velocity, const ze::Vector3& angular_velocity,
const DepthmapPtr& depth, const ze::Vector3& linear_velocity,
OpticFlowPtr& flow) const DepthmapPtr& depth,
{ OpticFlowPtr& flow
CHECK(flow); ) {
CHECK_EQ(flow->rows, camera->height()); CHECK(flow);
CHECK_EQ(flow->cols, camera->width()); CHECK_EQ(flow->rows, camera->height());
CHECK_EQ(flow->cols, camera->width());
const FloatType dt = 0.001; const FloatType dt = 0.001;
for(int y=0; y<flow->rows; ++y) for (int y = 0; y < flow->rows; ++y) {
{ for (int x = 0; x < flow->cols; ++x) {
for(int x=0; x<flow->cols; ++x) ze::Keypoint u_t(x, y);
{ ze::Bearing f = camera->backProject(u_t);
ze::Keypoint u_t(x,y); const FloatType z = static_cast<FloatType>((*depth)(y, x));
ze::Bearing f = camera->backProject(u_t); ze::Position Xc_t =
const FloatType z = static_cast<FloatType>((*depth)(y,x)); z * ze::Position(f[0] / f[2], f[1] / f[2], 1.);
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.);
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt); ze::Transformation::Rotation dR =
ze::Transformation::Position dT = -linear_velocity * dt; ze::Transformation::Rotation::exp(-angular_velocity * dt);
ze::Transformation::Position dT = -linear_velocity * dt;
// Transform Xc(t) to Xc(t+dt) // Transform Xc(t) to Xc(t+dt)
ze::Transformation T_tdt_t; ze::Transformation T_tdt_t;
T_tdt_t.getRotation() = dR; T_tdt_t.getRotation() = dR;
T_tdt_t.getPosition() = dT; T_tdt_t.getPosition() = dT;
VLOG(5) << T_tdt_t; VLOG(5) << T_tdt_t;
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t); ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
// Project Xc(t+dt) in the image plane // Project Xc(t+dt) in the image plane
ze::Keypoint u_t_dt = camera->project(Xc_t_dt); ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
VLOG(5) << u_t; VLOG(5) << u_t;
VLOG(5) << u_t_dt; VLOG(5) << u_t_dt;
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt; ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1)); (*flow)(y, x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
} }
}
}
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera, void computeOpticFlowFiniteDifference(
const ze::Vector3& angular_velocity, const ze::Camera::Ptr& camera,
const ze::Vector3& linear_velocity, const ze::Vector3& angular_velocity,
const DepthmapPtr& depth, const ze::Vector3& linear_velocity,
const ze::Vector3& r_COBJ, const DepthmapPtr& depth,
const ze::Vector3& angular_velocity_obj, const ze::Vector3& r_COBJ,
const ze::Vector3& linear_velocity_obj, const ze::Vector3& angular_velocity_obj,
OpticFlowPtr& flow) const ze::Vector3& linear_velocity_obj,
{ OpticFlowPtr& flow
CHECK(flow); ) {
CHECK_EQ(flow->rows, camera->height()); CHECK(flow);
CHECK_EQ(flow->cols, camera->width()); CHECK_EQ(flow->rows, camera->height());
CHECK_EQ(flow->cols, camera->width());
const FloatType dt = 0.001; const FloatType dt = 0.001;
for(int y=0; y<flow->rows; ++y) for (int y = 0; y < flow->rows; ++y) {
{ for (int x = 0; x < flow->cols; ++x) {
for(int x=0; x<flow->cols; ++x) ze::Keypoint u_t(x, y);
{ ze::Bearing f = camera->backProject(u_t);
ze::Keypoint u_t(x,y); const FloatType z = static_cast<FloatType>((*depth)(y, x));
ze::Bearing f = camera->backProject(u_t); ze::Position Xc_t =
const FloatType z = static_cast<FloatType>((*depth)(y,x)); z * ze::Position(f[0] / f[2], f[1] / f[2], 1.);
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.); ze::Position r_OBJX = Xc_t - r_COBJ;
ze::Position r_OBJX = Xc_t - r_COBJ; ze::Matrix33 w_WOBJ_tilde =
ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(angular_velocity_obj); ze::skewSymmetric(angular_velocity_obj);
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt); ze::Transformation::Rotation dR =
ze::Transformation::Position dT = linear_velocity_obj*dt - linear_velocity * dt + w_WOBJ_tilde*r_OBJX*dt; ze::Transformation::Rotation::exp(-angular_velocity * dt);
ze::Transformation::Position dT = linear_velocity_obj * dt
- linear_velocity * dt
+ w_WOBJ_tilde * r_OBJX * dt;
// Transform Xc(t) to Xc(t+dt) // Transform Xc(t) to Xc(t+dt)
ze::Transformation T_tdt_t; ze::Transformation T_tdt_t;
T_tdt_t.getRotation() = dR; T_tdt_t.getRotation() = dR;
T_tdt_t.getPosition() = dT; T_tdt_t.getPosition() = dT;
VLOG(5) << T_tdt_t; VLOG(5) << T_tdt_t;
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t); ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
// Project Xc(t+dt) in the image plane // Project Xc(t+dt) in the image plane
ze::Keypoint u_t_dt = camera->project(Xc_t_dt); ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
VLOG(5) << u_t; VLOG(5) << u_t;
VLOG(5) << u_t_dt; VLOG(5) << u_t_dt;
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt; ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1)); (*flow)(y, x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
}
}
} }
}
}
} // event_camera_simulator } // namespace event_camera_simulator
TEST(Utils, testOpticFlowComputation) TEST(Utils, testOpticFlowComputation) {
{ using namespace event_camera_simulator;
using namespace event_camera_simulator;
// Load camera calib from folder // Load camera calib from folder
const std::string path_to_data_folder = getTestDataDir("camera_calibs"); const std::string path_to_data_folder = getTestDataDir("camera_calibs");
ze::CameraRig::Ptr camera_rig ze::CameraRig::Ptr camera_rig = ze::cameraRigFromYaml(
= ze::cameraRigFromYaml(ze::joinPath(path_to_data_folder, "pinhole_mono.yaml")); ze::joinPath(path_to_data_folder, "pinhole_mono.yaml")
);
CHECK(camera_rig); CHECK(camera_rig);
const ze::Camera::Ptr camera = camera_rig->atShared(0); const ze::Camera::Ptr camera = camera_rig->atShared(0);
cv::Size sensor_size(camera->width(), camera->height()); cv::Size sensor_size(camera->width(), camera->height());
OpticFlowPtr flow_analytic = OpticFlowPtr flow_analytic = std::make_shared<OpticFlow>(sensor_size);
std::make_shared<OpticFlow>(sensor_size);
// Sample random depth map // Sample random depth map
const ImageFloatType z_mean = 5.0; const ImageFloatType z_mean = 5.0;
const ImageFloatType z_stddev = 0.5; const ImageFloatType z_stddev = 0.5;
DepthmapPtr depth = std::make_shared<Depthmap>(sensor_size); DepthmapPtr depth = std::make_shared<Depthmap>(sensor_size);
for(int y=0; y<sensor_size.height; ++y) for (int y = 0; y < sensor_size.height; ++y) {
{ for (int x = 0; x < sensor_size.width; ++x) {
for(int x=0; x<sensor_size.width; ++x) (*depth)(y, x) =
{ ze::sampleNormalDistribution(true, z_mean, z_stddev);
(*depth)(y,x) = ze::sampleNormalDistribution(true, z_mean, z_stddev); }
} }
}
// Sample random linear and angular velocity // Sample random linear and angular velocity
ze::Vector3 angular_velocity, linear_velocity; ze::Vector3 angular_velocity, linear_velocity;
angular_velocity.setRandom(); angular_velocity.setRandom();
linear_velocity.setRandom(); linear_velocity.setRandom();
LOG(INFO) << "w = " << angular_velocity; LOG(INFO) << "w = " << angular_velocity;
LOG(INFO) << "v = " << linear_velocity; LOG(INFO) << "v = " << linear_velocity;
// Compute optic flow on the image plane according // Compute optic flow on the image plane according
// to the sampled angular/linear velocity // to the sampled angular/linear velocity
OpticFlowHelper optic_flow_helper(camera); OpticFlowHelper optic_flow_helper(camera);
optic_flow_helper.computeFromDepthAndTwist(angular_velocity, linear_velocity, optic_flow_helper.computeFromDepthAndTwist(
depth, flow_analytic); angular_velocity,
linear_velocity,
depth,
flow_analytic
);
OpticFlowPtr flow_finite_diff = OpticFlowPtr flow_finite_diff = std::make_shared<OpticFlow>(sensor_size);
std::make_shared<OpticFlow>(sensor_size);
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity, computeOpticFlowFiniteDifference(
depth, flow_finite_diff); camera,
angular_velocity,
linear_velocity,
depth,
flow_finite_diff
);
// Check that the analytical flow and finite-difference flow match // Check that the analytical flow and finite-difference flow match
for(int y=0; y<sensor_size.height; ++y) for (int y = 0; y < sensor_size.height; ++y) {
{ for (int x = 0; x < sensor_size.width; ++x) {
for(int x=0; x<sensor_size.width; ++x) EXPECT_NEAR(
{ (*flow_analytic)(y, x)[0],
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1); (*flow_finite_diff)(y, x)[0],
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1); 0.1
);
EXPECT_NEAR(
(*flow_analytic)(y, x)[1],
(*flow_finite_diff)(y, x)[1],
0.1
);
}
} }
}
/**********************************************/ /**********************************************/
/* repeat optic flow test for dynamic objects */ /* repeat optic flow test for dynamic objects */
/**********************************************/ /**********************************************/
// sample random obj position and linear and angular velocity // sample random obj position and linear and angular velocity
ze::Vector3 r_COBJ; ze::Vector3 r_COBJ;
r_COBJ.setRandom(); r_COBJ.setRandom();
r_COBJ(2) = ze::sampleNormalDistribution(true, z_mean, z_stddev); // assume object is in front of camera r_COBJ(2) = ze::sampleNormalDistribution(
true,
z_mean,
z_stddev
); // assume object is in front of camera
ze::Vector3 angular_velocity_obj, linear_velocity_obj; ze::Vector3 angular_velocity_obj, linear_velocity_obj;
angular_velocity_obj.setRandom(); angular_velocity_obj.setRandom();
linear_velocity_obj.setRandom(); linear_velocity_obj.setRandom();
// Compute optic flow on the image plane according // Compute optic flow on the image plane according
// to the sampled angular/linear velocity // to the sampled angular/linear velocity
optic_flow_helper.computeFromDepthCamTwistAndObjDepthAndTwist(angular_velocity, linear_velocity, depth, optic_flow_helper.computeFromDepthCamTwistAndObjDepthAndTwist(
r_COBJ, angular_velocity_obj, linear_velocity_obj, angular_velocity,
flow_analytic); linear_velocity,
depth,
r_COBJ,
angular_velocity_obj,
linear_velocity_obj,
flow_analytic
);
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity, depth, computeOpticFlowFiniteDifference(
r_COBJ, angular_velocity_obj, linear_velocity_obj, camera,
flow_finite_diff); angular_velocity,
linear_velocity,
depth,
r_COBJ,
angular_velocity_obj,
linear_velocity_obj,
flow_finite_diff
);
// Check that the analytical flow and finite-difference flow match // Check that the analytical flow and finite-difference flow match
for(int y=0; y<sensor_size.height; ++y) for (int y = 0; y < sensor_size.height; ++y) {
{ for (int x = 0; x < sensor_size.width; ++x) {
for(int x=0; x<sensor_size.width; ++x) EXPECT_NEAR(
{ (*flow_analytic)(y, x)[0],
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1); (*flow_finite_diff)(y, x)[0],
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1); 0.1
);
EXPECT_NEAR(
(*flow_analytic)(y, x)[1],
(*flow_finite_diff)(y, x)[1],
0.1
);
}
} }
}
} }
ZE_UNITTEST_ENTRYPOINT ZE_UNITTEST_ENTRYPOINT

View File

@ -1,74 +1,72 @@
#pragma once #pragma once
#include <atomic> #include <atomic>
#include <esim/common/types.hpp>
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <esim/common/types.hpp>
#include <ze/common/macros.hpp> #include <ze/common/macros.hpp>
#include <ze/common/noncopyable.hpp> #include <ze/common/noncopyable.hpp>
#include <ze/common/signal_handler.hpp> #include <ze/common/signal_handler.hpp>
// fwd // fwd
namespace cv { namespace cv {
class Mat; class Mat;
} }
namespace event_camera_simulator { namespace event_camera_simulator {
using Callback = using Callback = std::function<void(const SimulatorData& sim_data)>;
std::function<void (const SimulatorData& sim_data)>;
enum class DataProviderType { enum class DataProviderType { RendererOnline, Folder, Rosbag };
RendererOnline,
Folder,
Rosbag
};
//! A data provider registers to a data source and triggers callbacks when //! A data provider registers to a data source and triggers callbacks when
//! new data is available. //! new data is available.
class DataProviderBase : ze::Noncopyable class DataProviderBase : ze::Noncopyable {
{ public:
public: ZE_POINTER_TYPEDEFS(DataProviderBase);
ZE_POINTER_TYPEDEFS(DataProviderBase);
DataProviderBase() = delete; DataProviderBase() = delete;
DataProviderBase(DataProviderType type); DataProviderBase(DataProviderType type);
virtual ~DataProviderBase() = default; virtual ~DataProviderBase() = default;
//! Process all callbacks. Waits until callback is processed. //! Process all callbacks. Waits until callback is processed.
void spin(); void spin();
//! Read next data field and process callback. Returns false when datatset finished. //! Read next data field and process callback. Returns false when
virtual bool spinOnce() = 0; //! datatset finished.
virtual bool spinOnce() = 0;
//! False if there is no more data to process or there was a shutdown signal. //! False if there is no more data to process or there was a shutdown
virtual bool ok() const = 0; //! signal.
virtual bool ok() const = 0;
//! Pause data provider. //! Pause data provider.
virtual void pause(); virtual void pause();
//! Stop data provider. //! Stop data provider.
virtual void shutdown(); virtual void shutdown();
//! Register callback function to call when new message is available. //! Register callback function to call when new message is available.
void registerCallback(const Callback& callback); void registerCallback(const Callback& callback);
//! Returns the number of cameras in the rig //! Returns the number of cameras in the rig
virtual size_t numCameras() const = 0; virtual size_t numCameras() const = 0;
//! Returns the camera rig //! Returns the camera rig
ze::CameraRig::Ptr getCameraRig() { return camera_rig_; } ze::CameraRig::Ptr getCameraRig() {
return camera_rig_;
}
protected: protected:
DataProviderType type_; DataProviderType type_;
Callback callback_; Callback callback_;
volatile bool running_ = true; volatile bool running_ = true;
ze::CameraRig::Ptr camera_rig_; ze::CameraRig::Ptr camera_rig_;
private: private:
ze::SimpleSigtermHandler signal_handler_; //!< Sets running_ to false when Ctrl-C is pressed. ze::SimpleSigtermHandler
}; signal_handler_; //!< Sets running_ to false when Ctrl-C is pressed.
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,10 +1,10 @@
#pragma once #pragma once
#include <gflags/gflags.h>
#include <esim/data_provider/data_provider_base.hpp> #include <esim/data_provider/data_provider_base.hpp>
#include <gflags/gflags.h>
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderBase::Ptr loadDataProviderFromGflags(); DataProviderBase::Ptr loadDataProviderFromGflags();
} // namespace ze } // namespace event_camera_simulator

View File

@ -1,44 +1,41 @@
#pragma once #pragma once
#include <esim/data_provider/data_provider_base.hpp>
#include <fstream>
#include <map> #include <map>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <ze/cameras/camera_rig.hpp>
#include <ze/common/macros.hpp> #include <ze/common/macros.hpp>
#include <ze/common/types.hpp> #include <ze/common/types.hpp>
#include <esim/data_provider/data_provider_base.hpp>
#include <ze/cameras/camera_rig.hpp>
#include <fstream>
namespace event_camera_simulator { namespace event_camera_simulator {
class DataProviderFromFolder : public DataProviderBase class DataProviderFromFolder : public DataProviderBase {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
DataProviderFromFolder(const std::string& path_to_data_folder); DataProviderFromFolder(const std::string& path_to_data_folder);
virtual ~DataProviderFromFolder() = default; virtual ~DataProviderFromFolder() = default;
virtual bool spinOnce() override; virtual bool spinOnce() override;
virtual bool ok() const override; virtual bool ok() const override;
size_t numCameras() const override; size_t numCameras() const override;
private: private:
int64_t getTimeStamp(const std::string& ts_str) const;
int64_t getTimeStamp(const std::string& ts_str) const; std::string path_to_data_folder_;
std::ifstream images_in_str_;
const char delimiter_{','};
const size_t num_tokens_in_line_ = 2; // stamp, image
bool finished_parsing_;
std::string path_to_data_folder_; SimulatorData sim_data_;
std::ifstream images_in_str_; };
const char delimiter_{','};
const size_t num_tokens_in_line_ = 2; // stamp, image
bool finished_parsing_;
SimulatorData sim_data_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,75 +1,77 @@
#pragma once #pragma once
#include <esim/common/utils.hpp>
#include <esim/data_provider/data_provider_base.hpp>
#include <esim/rendering/renderer_base.hpp>
#include <map> #include <map>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <ze/cameras/camera_rig.hpp>
#include <ze/common/macros.hpp> #include <ze/common/macros.hpp>
#include <ze/common/types.hpp> #include <ze/common/types.hpp>
#include <esim/data_provider/data_provider_base.hpp>
#include <esim/rendering/renderer_base.hpp>
#include <esim/common/utils.hpp>
#include <ze/vi_simulation/trajectory_simulator.hpp>
#include <ze/vi_simulation/imu_simulator.hpp> #include <ze/vi_simulation/imu_simulator.hpp>
#include <ze/cameras/camera_rig.hpp> #include <ze/vi_simulation/trajectory_simulator.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
/** /**
* Online data provider intended to simulate a camera rig composed of multiple * Online data provider intended to simulate a camera rig composed of
* cameras rigidly attached together, along with an Inertial Measurement Unit (IMU). * multiple cameras rigidly attached together, along with an Inertial
* * Measurement Unit (IMU).
* The camera rig follows a camera trajectory in 3D. *
*/ * The camera rig follows a camera trajectory in 3D.
class DataProviderOnlineMoving3DCameraRig : public DataProviderBase */
{ class DataProviderOnlineMoving3DCameraRig : public DataProviderBase {
public: public:
DataProviderOnlineMoving3DCameraRig(ze::real_t simulation_minimum_framerate, DataProviderOnlineMoving3DCameraRig(
ze::real_t simulation_imu_rate, ze::real_t simulation_minimum_framerate,
int simulation_adaptive_sampling_method, ze::real_t simulation_imu_rate,
ze::real_t simulation_adaptive_sampling_lambda); int simulation_adaptive_sampling_method,
ze::real_t simulation_adaptive_sampling_lambda
);
virtual ~DataProviderOnlineMoving3DCameraRig(); virtual ~DataProviderOnlineMoving3DCameraRig();
virtual bool spinOnce() override; virtual bool spinOnce() override;
virtual bool ok() const override; virtual bool ok() const override;
size_t numCameras() const override; size_t numCameras() const override;
private: private:
void updateGroundtruth();
void sampleImu();
void sampleFrame();
void updateGroundtruth(); void setImuUpdated();
void sampleImu(); void setFrameUpdated();
void sampleFrame(); void setAllUpdated();
void setImuUpdated(); std::vector<Renderer::Ptr> renderers_;
void setFrameUpdated(); std::vector<OpticFlowHelper::Ptr> optic_flow_helpers_;
void setAllUpdated(); ze::TrajectorySimulator::Ptr trajectory_;
ze::ImuSimulator::Ptr imu_;
SimulatorData sim_data_;
std::vector<Renderer::Ptr> renderers_; ze::real_t t_;
std::vector<OpticFlowHelper::Ptr> optic_flow_helpers_; ze::real_t last_t_frame_; // latest next sampling time in order to
ze::TrajectorySimulator::Ptr trajectory_; // guarantee the IMU rate
ze::ImuSimulator::Ptr imu_; ze::real_t last_t_imu_; // latest next sampling time in order to
SimulatorData sim_data_; // guarantee the minimum frame rate
ze::real_t next_t_flow_; // latest next sampling time in order to
// guarantee that the max pixel displacement
// since the last frame is bounded
ze::real_t dt_imu_;
ze::real_t dt_frame_;
ze::real_t t_; ze::real_t simulation_minimum_framerate_;
ze::real_t last_t_frame_; // latest next sampling time in order to guarantee the IMU rate ze::real_t simulation_imu_rate_;
ze::real_t last_t_imu_; // latest next sampling time in order to guarantee the minimum frame rate int simulation_adaptive_sampling_method_;
ze::real_t next_t_flow_; // latest next sampling time in order to guarantee that the max pixel displacement since the last frame is bounded ze::real_t simulation_adaptive_sampling_lambda_;
ze::real_t dt_imu_;
ze::real_t dt_frame_;
ze::real_t simulation_minimum_framerate_; // dynamic objects
ze::real_t simulation_imu_rate_; std::vector<ze::TrajectorySimulator::Ptr> trajectory_dyn_obj_;
int simulation_adaptive_sampling_method_; };
ze::real_t simulation_adaptive_sampling_lambda_;
// dynamic objects
std::vector<ze::TrajectorySimulator::Ptr> trajectory_dyn_obj_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,54 +1,56 @@
#pragma once #pragma once
#include <esim/data_provider/data_provider_base.hpp>
#include <esim/rendering/simple_renderer_base.hpp>
#include <map> #include <map>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <ze/common/macros.hpp> #include <ze/common/macros.hpp>
#include <ze/common/types.hpp> #include <ze/common/types.hpp>
#include <esim/data_provider/data_provider_base.hpp>
#include <esim/rendering/simple_renderer_base.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
/** /**
* Simple online data provider, intended to simulate a single event camera * Simple online data provider, intended to simulate a single event camera
* based on images + optic flow maps provided by a rendering engine. * based on images + optic flow maps provided by a rendering engine.
* *
* This data provider does NOT simulate a camera trajectory or an IMU. * This data provider does NOT simulate a camera trajectory or an IMU.
*/ */
class DataProviderOnlineSimple : public DataProviderBase class DataProviderOnlineSimple : public DataProviderBase {
{ public:
public: DataProviderOnlineSimple(
DataProviderOnlineSimple(ze::real_t simulation_minimum_framerate, ze::real_t simulation_minimum_framerate,
int simulation_adaptive_sampling_method, int simulation_adaptive_sampling_method,
ze::real_t simulation_adaptive_sampling_lambda); ze::real_t simulation_adaptive_sampling_lambda
);
virtual ~DataProviderOnlineSimple(); virtual ~DataProviderOnlineSimple();
virtual bool spinOnce() override; virtual bool spinOnce() override;
virtual bool ok() const override; virtual bool ok() const override;
size_t numCameras() const override; size_t numCameras() const override;
private: private:
bool sampleFrame(); bool sampleFrame();
void setFrameUpdated(); void setFrameUpdated();
SimpleRenderer::Ptr renderer_; SimpleRenderer::Ptr renderer_;
SimulatorData sim_data_; SimulatorData sim_data_;
ze::real_t t_; ze::real_t t_;
ze::real_t last_t_frame_; // latest next sampling time in order to guarantee the minimum frame rate ze::real_t last_t_frame_; // latest next sampling time in order to
ze::real_t next_t_adaptive_; // latest next sampling time in order to guarantee that the adaptive sampling scheme is respected // guarantee the minimum frame rate
ze::real_t dt_frame_; ze::real_t
next_t_adaptive_; // latest next sampling time in order to guarantee
// that the adaptive sampling scheme is respected
ze::real_t dt_frame_;
ze::real_t simulation_minimum_framerate_; ze::real_t simulation_minimum_framerate_;
int simulation_adaptive_sampling_method_; int simulation_adaptive_sampling_method_;
ze::real_t simulation_adaptive_sampling_lambda_; ze::real_t simulation_adaptive_sampling_lambda_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,57 +1,57 @@
// This file was adapted from the ze_oss project: https://github.com/zurich-eye/ze_oss/blob/master/ze_data_provider/include/ze/data_provider/data_provider_rosbag.hpp // This file was adapted from the ze_oss project:
// https://github.com/zurich-eye/ze_oss/blob/master/ze_data_provider/include/ze/data_provider/data_provider_rosbag.hpp
// Copyright (C) ETH Zurich, Wyss Zurich, Zurich Eye - All Rights Reserved // Copyright (C) ETH Zurich, Wyss Zurich, Zurich Eye - All Rights Reserved
#pragma once #pragma once
#include <esim/data_provider/data_provider_base.hpp>
#include <gflags/gflags.h>
#include <map> #include <map>
#include <string>
#include <memory> #include <memory>
#include <vector>
#include<gflags/gflags.h>
#include <rosbag/bag.h> #include <rosbag/bag.h>
#include <rosbag/view.h> #include <rosbag/view.h>
#include <sensor_msgs/Image.h> #include <sensor_msgs/Image.h>
#include <string>
#include <esim/data_provider/data_provider_base.hpp> #include <vector>
namespace event_camera_simulator { namespace event_camera_simulator {
class DataProviderRosbag : public DataProviderBase class DataProviderRosbag : public DataProviderBase {
{ public:
public: DataProviderRosbag(
DataProviderRosbag( const std::string& bag_filename,
const std::string& bag_filename, const std::map<std::string, size_t>& camera_topics
const std::map<std::string, size_t>& camera_topics); );
virtual ~DataProviderRosbag() = default; virtual ~DataProviderRosbag() = default;
virtual bool spinOnce() override; virtual bool spinOnce() override;
virtual bool ok() const override; virtual bool ok() const override;
virtual size_t numCameras() const; virtual size_t numCameras() const;
size_t size() const; size_t size() const;
private: private:
void loadRosbag(const std::string& bag_filename); void loadRosbag(const std::string& bag_filename);
void initBagView(const std::vector<std::string>& topics); void initBagView(const std::vector<std::string>& topics);
inline bool cameraSpin(const sensor_msgs::ImageConstPtr m_img, inline bool cameraSpin(
const rosbag::MessageInstance& m); const sensor_msgs::ImageConstPtr m_img,
const rosbag::MessageInstance& m
);
std::unique_ptr<rosbag::Bag> bag_; std::unique_ptr<rosbag::Bag> bag_;
std::unique_ptr<rosbag::View> bag_view_; std::unique_ptr<rosbag::View> bag_view_;
rosbag::View::iterator bag_view_it_; rosbag::View::iterator bag_view_it_;
int n_processed_images_ = 0; int n_processed_images_ = 0;
// subscribed topics: // subscribed topics:
std::map<std::string, size_t> img_topic_camidx_map_; // camera_topic --> camera_id std::map<std::string, size_t>
img_topic_camidx_map_; // camera_topic --> camera_id
SimulatorData sim_data_; SimulatorData sim_data_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,13 +1,13 @@
#pragma once #pragma once
#include <gflags/gflags.h>
#include <esim/rendering/renderer_base.hpp> #include <esim/rendering/renderer_base.hpp>
#include <esim/rendering/simple_renderer_base.hpp> #include <esim/rendering/simple_renderer_base.hpp>
#include <gflags/gflags.h>
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat *img); bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img);
Renderer::Ptr loadRendererFromGflags(); Renderer::Ptr loadRendererFromGflags();
SimpleRenderer::Ptr loadSimpleRendererFromGflags(); SimpleRenderer::Ptr loadSimpleRendererFromGflags();
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -2,32 +2,25 @@
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderBase::DataProviderBase(DataProviderType type) DataProviderBase::DataProviderBase(DataProviderType type)
: type_(type) : type_(type),
, signal_handler_(running_) signal_handler_(running_) {}
{}
void DataProviderBase::spin() void DataProviderBase::spin() {
{ while (ok())
while (ok()) spinOnce();
{ }
spinOnce();
}
}
void DataProviderBase::pause() void DataProviderBase::pause() {
{ running_ = false;
running_ = false; }
}
void DataProviderBase::shutdown() void DataProviderBase::shutdown() {
{ running_ = false;
running_ = false; }
}
void DataProviderBase::registerCallback(const Callback& callback) void DataProviderBase::registerCallback(const Callback& callback) {
{ callback_ = callback;
callback_ = callback; }
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,38 +1,50 @@
#include <ze/common/logging.hpp>
#include <esim/data_provider/data_provider_factory.hpp>
#include <esim/data_provider/data_provider_base.hpp> #include <esim/data_provider/data_provider_base.hpp>
#include <esim/data_provider/data_provider_factory.hpp>
#include <esim/data_provider/data_provider_from_folder.hpp>
#include <esim/data_provider/data_provider_online_render.hpp> #include <esim/data_provider/data_provider_online_render.hpp>
#include <esim/data_provider/data_provider_online_simple.hpp> #include <esim/data_provider/data_provider_online_simple.hpp>
#include <esim/data_provider/data_provider_from_folder.hpp>
#include <esim/data_provider/data_provider_rosbag.hpp> #include <esim/data_provider/data_provider_rosbag.hpp>
#include <ze/common/logging.hpp>
DEFINE_int32(data_source, 0, " 0: Online renderer"); DEFINE_int32(data_source, 0, " 0: Online renderer");
DEFINE_double(simulation_minimum_framerate, 30.0, DEFINE_double(
"Minimum frame rate, in Hz" simulation_minimum_framerate,
"Especially useful when the event rate is low, to guarantee" 30.0,
"that frames are still published at a minimum framerate"); "Minimum frame rate, in Hz"
"Especially useful when the event rate is low, to guarantee"
"that frames are still published at a minimum framerate"
);
DEFINE_double(simulation_imu_rate, 1000.0, DEFINE_double(
"Fixed IMU sampling frequency, in Hz"); simulation_imu_rate, 1000.0, "Fixed IMU sampling frequency, in Hz"
);
DEFINE_int32(simulation_adaptive_sampling_method, 0, DEFINE_int32(
"Method to use for adaptive sampling." simulation_adaptive_sampling_method,
"0: based on predicted absolute brightness change" 0,
"1: based on optic flow"); "Method to use for adaptive sampling."
"0: based on predicted absolute brightness change"
"1: based on optic flow"
);
DEFINE_double(simulation_adaptive_sampling_lambda, 0.5, DEFINE_double(
"Parameter that controls the behavior of the adaptive sampling method." simulation_adaptive_sampling_lambda,
"The meaning of this value depends on the adaptive sampling method used:" 0.5,
"...based on predicted absolute brightness change: deltaT = lambda / max(|dL/dt|)" "Parameter that controls the behavior of the adaptive sampling method."
"...based on optic flow: deltaT = lambda \ max(||du/dt||) where du/dt denotes the 2D optic flow field."); "The meaning of this value depends on the adaptive sampling method used:"
"...based on predicted absolute brightness change: deltaT = lambda / "
DEFINE_string(path_to_data_folder, "", "max(|dL/dt|)"
"Path to folder containing the data."); "...based on optic flow: deltaT = lambda \ max(||du/dt||) where du/dt "
"denotes the 2D optic flow field."
);
DEFINE_string(path_to_data_folder, "", "Path to folder containing the data.");
// Parameters for the DataProviderRosbag // Parameters for the DataProviderRosbag
DEFINE_string(bag_filename, "dataset.bag", "Name of bagfile from which to read."); DEFINE_string(
bag_filename, "dataset.bag", "Name of bagfile from which to read."
);
DEFINE_string(topic_cam0, "/cam0/image_raw", ""); DEFINE_string(topic_cam0, "/cam0/image_raw", "");
DEFINE_string(topic_cam1, "/cam1/image_raw", ""); DEFINE_string(topic_cam1, "/cam1/image_raw", "");
DEFINE_string(topic_cam2, "/cam2/image_raw", ""); DEFINE_string(topic_cam2, "/cam2/image_raw", "");
@ -41,59 +53,64 @@ DEFINE_uint64(num_cams, 1, "Number of normal cameras to read from rosbag.");
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderBase::Ptr loadDataProviderFromGflags() DataProviderBase::Ptr loadDataProviderFromGflags() {
{ // Create data provider.
// Create data provider. DataProviderBase::Ptr data_provider;
DataProviderBase::Ptr data_provider; switch (FLAGS_data_source) {
switch (FLAGS_data_source) case 0: // Online Renderer for Moving 3D Camera Rig with IMU
{ {
case 0: // Online Renderer for Moving 3D Camera Rig with IMU data_provider.reset(new DataProviderOnlineMoving3DCameraRig(
{ FLAGS_simulation_minimum_framerate,
data_provider.reset( FLAGS_simulation_imu_rate,
new DataProviderOnlineMoving3DCameraRig(FLAGS_simulation_minimum_framerate, FLAGS_simulation_adaptive_sampling_method,
FLAGS_simulation_imu_rate, FLAGS_simulation_adaptive_sampling_lambda
FLAGS_simulation_adaptive_sampling_method, ));
FLAGS_simulation_adaptive_sampling_lambda)); break;
break; }
} case 1: // Online Renderer Simple
case 1: // Online Renderer Simple {
{ data_provider.reset(new DataProviderOnlineSimple(
data_provider.reset( FLAGS_simulation_minimum_framerate,
new DataProviderOnlineSimple(FLAGS_simulation_minimum_framerate, FLAGS_simulation_adaptive_sampling_method,
FLAGS_simulation_adaptive_sampling_method, FLAGS_simulation_adaptive_sampling_lambda
FLAGS_simulation_adaptive_sampling_lambda)); ));
break; break;
} }
case 2: // Read data from a folder case 2: // Read data from a folder
{ {
data_provider.reset( data_provider.reset(
new DataProviderFromFolder(FLAGS_path_to_data_folder)); new DataProviderFromFolder(FLAGS_path_to_data_folder)
break; );
} break;
case 3: // Read data (images) from a rosbag }
{ case 3: // Read data (images) from a rosbag
CHECK_LE(FLAGS_num_cams, 4u); {
CHECK_EQ(FLAGS_num_cams, 1u) << "Only one camera is supported currently"; CHECK_LE(FLAGS_num_cams, 4u);
CHECK_EQ(FLAGS_num_cams, 1u)
<< "Only one camera is supported currently";
// Fill camera topics. // Fill camera topics.
std::map<std::string, size_t> cam_topics; std::map<std::string, size_t> cam_topics;
if (FLAGS_num_cams >= 1) cam_topics[FLAGS_topic_cam0] = 0; if (FLAGS_num_cams >= 1)
if (FLAGS_num_cams >= 2) cam_topics[FLAGS_topic_cam1] = 1; cam_topics[FLAGS_topic_cam0] = 0;
if (FLAGS_num_cams >= 3) cam_topics[FLAGS_topic_cam2] = 2; if (FLAGS_num_cams >= 2)
if (FLAGS_num_cams >= 4) cam_topics[FLAGS_topic_cam3] = 3; cam_topics[FLAGS_topic_cam1] = 1;
data_provider.reset( if (FLAGS_num_cams >= 3)
new DataProviderRosbag(FLAGS_bag_filename, cam_topics[FLAGS_topic_cam2] = 2;
cam_topics)); if (FLAGS_num_cams >= 4)
break; cam_topics[FLAGS_topic_cam3] = 3;
} data_provider.reset(
default: new DataProviderRosbag(FLAGS_bag_filename, cam_topics)
{ );
LOG(FATAL) << "Data source not known."; break;
break; }
} default: {
} LOG(FATAL) << "Data source not known.";
break;
}
}
return data_provider; return data_provider;
} }
} // namespace ze } // namespace event_camera_simulator

View File

@ -1,92 +1,105 @@
#include <esim/data_provider/data_provider_from_folder.hpp> #include <esim/data_provider/data_provider_from_folder.hpp>
#include <ze/common/file_utils.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp> #include <opencv2/imgcodecs/imgcodecs.hpp>
#include <ze/cameras/camera_impl.hpp> #include <ze/cameras/camera_impl.hpp>
#include <ze/common/file_utils.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderFromFolder::DataProviderFromFolder(const std::string& path_to_data_folder) DataProviderFromFolder::DataProviderFromFolder(
: DataProviderBase(DataProviderType::Folder), const std::string& path_to_data_folder
path_to_data_folder_(path_to_data_folder), )
finished_parsing_(false) : DataProviderBase(DataProviderType::Folder),
{ path_to_data_folder_(path_to_data_folder),
// Load CSV image file finished_parsing_(false) {
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv")); // Load CSV image file
CHECK(images_in_str_.is_open()); images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv"));
CHECK(images_in_str_.is_open());
// Create dummy camera rig // Create dummy camera rig
// these intrinsic values are filled with garbage (width = height = 1) since the actual values are not known // these intrinsic values are filled with garbage (width = height = 1)
ze::CameraVector cameras; // since the actual values are not known
cameras.emplace_back(ze::createEquidistantCameraShared(1, 1, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)); ze::CameraVector cameras;
ze::TransformationVector extrinsics; cameras.emplace_back(ze::createEquidistantCameraShared(
extrinsics.push_back(ze::Transformation()); 1,
camera_rig_ = std::make_shared<ze::CameraRig>(extrinsics, cameras, "camera"); 1,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0
));
ze::TransformationVector extrinsics;
extrinsics.push_back(ze::Transformation());
camera_rig_ =
std::make_shared<ze::CameraRig>(extrinsics, cameras, "camera");
// Allocate memory for image data // Allocate memory for image data
sim_data_.images.emplace_back(ImagePtr(new Image( sim_data_.images.emplace_back(ImagePtr(new Image(
cv::Size(camera_rig_->at(0).width(), cv::Size(camera_rig_->at(0).width(), camera_rig_->at(0).height())
camera_rig_->at(0).height())))); )));
sim_data_.camera_rig = camera_rig_; sim_data_.camera_rig = camera_rig_;
sim_data_.images_updated = true; sim_data_.images_updated = true;
sim_data_.depthmaps_updated = false; sim_data_.depthmaps_updated = false;
sim_data_.optic_flows_updated = false; sim_data_.optic_flows_updated = false;
sim_data_.twists_updated = false; sim_data_.twists_updated = false;
sim_data_.poses_updated = false; sim_data_.poses_updated = false;
sim_data_.imu_updated = false; sim_data_.imu_updated = false;
}
int64_t DataProviderFromFolder::getTimeStamp(const std::string& ts_str) const
{
return std::stoll(ts_str);
}
size_t DataProviderFromFolder::numCameras() const
{
return camera_rig_->size();
}
bool DataProviderFromFolder::spinOnce()
{
std::string line;
if(!getline(images_in_str_, line))
{
VLOG(1) << "Finished parsing images.csv file";
finished_parsing_ = true;
return false;
}
if('%' != line.at(0) && '#' != line.at(0))
{
std::vector<std::string> items = ze::splitString(line, delimiter_);
CHECK_GE(items.size(), num_tokens_in_line_);
int64_t stamp = getTimeStamp(items[0]);
const std::string& path_to_image = ze::joinPath(path_to_data_folder_, items[1]);
cv::Mat image = cv::imread(path_to_image, 0);
CHECK(image.data) << "Could not load image from file: " << path_to_image;
VLOG(3) << "Read image from file: " << path_to_image;
image.convertTo(*sim_data_.images[0], cv::DataType<ImageFloatType>::type, 1./255.);
if(callback_)
{
sim_data_.timestamp = static_cast<Time>(stamp);
callback_(sim_data_);
} }
}
return true; int64_t DataProviderFromFolder::getTimeStamp(const std::string& ts_str
} ) const {
return std::stoll(ts_str);
}
bool DataProviderFromFolder::ok() const size_t DataProviderFromFolder::numCameras() const {
{ return camera_rig_->size();
if (!running_ || finished_parsing_) }
{
VLOG(1) << "Data Provider was paused/terminated."; bool DataProviderFromFolder::spinOnce() {
return false; std::string line;
} if (!getline(images_in_str_, line)) {
return true; VLOG(1) << "Finished parsing images.csv file";
} finished_parsing_ = true;
return false;
}
if ('%' != line.at(0) && '#' != line.at(0)) {
std::vector<std::string> items = ze::splitString(line, delimiter_);
CHECK_GE(items.size(), num_tokens_in_line_);
int64_t stamp = getTimeStamp(items[0]);
const std::string& path_to_image =
ze::joinPath(path_to_data_folder_, items[1]);
cv::Mat image = cv::imread(path_to_image, 0);
CHECK(image.data)
<< "Could not load image from file: " << path_to_image;
VLOG(3) << "Read image from file: " << path_to_image;
image.convertTo(
*sim_data_.images[0],
cv::DataType<ImageFloatType>::type,
1. / 255.
);
if (callback_) {
sim_data_.timestamp = static_cast<Time>(stamp);
callback_(sim_data_);
}
}
return true;
}
bool DataProviderFromFolder::ok() const {
if (!running_ || finished_parsing_) {
VLOG(1) << "Data Provider was paused/terminated.";
return false;
}
return true;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,420 +1,450 @@
#include <esim/data_provider/data_provider_online_render.hpp>
#include <esim/trajectory/trajectory_factory.hpp>
#include <esim/trajectory/imu_factory.hpp>
#include <esim/data_provider/renderer_factory.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/data_provider/data_provider_online_render.hpp>
#include <esim/data_provider/renderer_factory.hpp>
#include <esim/trajectory/imu_factory.hpp>
#include <esim/trajectory/trajectory_factory.hpp>
#include <ze/cameras/camera_rig.hpp> #include <ze/cameras/camera_rig.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
#include <ze/common/timer_collection.hpp> #include <ze/common/timer_collection.hpp>
DECLARE_TIMER(TimerDataProvider, timers_data_provider_, DECLARE_TIMER(
render, TimerDataProvider,
optic_flow, timers_data_provider_,
sample_frame, render,
sample_imu optic_flow,
); sample_frame,
sample_imu
);
DEFINE_double(simulation_post_gaussian_blur_sigma, 0, DEFINE_double(
"If sigma > 0, Gaussian blur the renderered images" simulation_post_gaussian_blur_sigma,
"with a Gaussian filter standard deviation sigma."); 0,
"If sigma > 0, Gaussian blur the renderered images"
"with a Gaussian filter standard deviation sigma."
);
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderOnlineMoving3DCameraRig::DataProviderOnlineMoving3DCameraRig(ze::real_t simulation_minimum_framerate, DataProviderOnlineMoving3DCameraRig::DataProviderOnlineMoving3DCameraRig(
ze::real_t simulation_imu_rate, ze::real_t simulation_minimum_framerate,
int simulation_adaptive_sampling_method, ze::real_t simulation_imu_rate,
ze::real_t simulation_adaptive_sampling_lambda) int simulation_adaptive_sampling_method,
: DataProviderBase(DataProviderType::RendererOnline), ze::real_t simulation_adaptive_sampling_lambda
simulation_minimum_framerate_(simulation_minimum_framerate), )
simulation_imu_rate_(simulation_imu_rate), : DataProviderBase(DataProviderType::RendererOnline),
simulation_adaptive_sampling_method_(simulation_adaptive_sampling_method), simulation_minimum_framerate_(simulation_minimum_framerate),
simulation_adaptive_sampling_lambda_(simulation_adaptive_sampling_lambda), simulation_imu_rate_(simulation_imu_rate),
dt_imu_(1./simulation_imu_rate), simulation_adaptive_sampling_method_(
dt_frame_(1./simulation_minimum_framerate) simulation_adaptive_sampling_method
{ ),
CHECK(simulation_adaptive_sampling_method == 0 simulation_adaptive_sampling_lambda_(
|| simulation_adaptive_sampling_method == 1); simulation_adaptive_sampling_lambda
),
dt_imu_(1. / simulation_imu_rate),
dt_frame_(1. / simulation_minimum_framerate) {
CHECK(
simulation_adaptive_sampling_method == 0
|| simulation_adaptive_sampling_method == 1
);
std::tie(trajectory_, trajectory_dyn_obj_) = loadTrajectorySimulatorFromGflags(); std::tie(trajectory_, trajectory_dyn_obj_) =
imu_ = loadImuSimulatorFromGflags(trajectory_); loadTrajectorySimulatorFromGflags();
camera_rig_ = ze::cameraRigFromGflags(); imu_ = loadImuSimulatorFromGflags(trajectory_);
camera_rig_ = ze::cameraRigFromGflags();
// Compute Field-of-Views for information // Compute Field-of-Views for information
const ze::Camera::Ptr camera = camera_rig_->atShared(0); const ze::Camera::Ptr camera = camera_rig_->atShared(0);
const int width = camera->width(); const int width = camera->width();
const int height = camera->height(); const int height = camera->height();
const ze::real_t horizontal_fov = const ze::real_t horizontal_fov =
180.0 / CV_PI * 180.0 / CV_PI
std::acos(camera->backProject(ze::Keypoint(0,height/2)).dot( * std::acos(camera->backProject(ze::Keypoint(0, height / 2))
camera->backProject(ze::Keypoint(width-1,height/2)))); .dot(camera->backProject(
ze::Keypoint(width - 1, height / 2)
)));
const ze::real_t vertical_fov = const ze::real_t vertical_fov =
180.0 / CV_PI * 180.0 / CV_PI
std::acos(camera->backProject(ze::Keypoint(width/2,0)).dot( * std::acos(camera->backProject(ze::Keypoint(width / 2, 0))
camera->backProject(ze::Keypoint(width/2,height-1)))); .dot(camera->backProject(
ze::Keypoint(width / 2, height - 1)
)));
const ze::real_t diagonal_fov = const ze::real_t diagonal_fov =
180.0 / CV_PI * 180.0 / CV_PI
std::acos(camera->backProject(ze::Keypoint(0,0)).dot( * std::acos(camera->backProject(ze::Keypoint(0, 0))
camera->backProject(ze::Keypoint(width-1,height-1)))); .dot(camera->backProject(
ze::Keypoint(width - 1, height - 1)
)));
LOG(INFO) << "Horizontal FOV: " << horizontal_fov << " deg"; LOG(INFO) << "Horizontal FOV: " << horizontal_fov << " deg";
LOG(INFO) << "Vertical FOV: " << vertical_fov << " deg"; LOG(INFO) << "Vertical FOV: " << vertical_fov << " deg";
LOG(INFO) << "Diagonal FOV: " << diagonal_fov << " deg"; LOG(INFO) << "Diagonal FOV: " << diagonal_fov << " deg";
for(size_t i=0; i<camera_rig_->size(); ++i) for (size_t i = 0; i < camera_rig_->size(); ++i) {
{ renderers_.push_back(std::move(loadRendererFromGflags()));
renderers_.push_back(std::move(loadRendererFromGflags())); renderers_[i]->setCamera(camera_rig_->atShared(i));
renderers_[i]->setCamera(camera_rig_->atShared(i));
optic_flow_helpers_.emplace_back(std::make_shared<OpticFlowHelper>(camera_rig_->atShared(i))); optic_flow_helpers_.emplace_back(
} std::make_shared<OpticFlowHelper>(camera_rig_->atShared(i))
);
}
const size_t num_cameras = camera_rig_->size(); const size_t num_cameras = camera_rig_->size();
sim_data_.groundtruth.T_W_Cs.resize(num_cameras); sim_data_.groundtruth.T_W_Cs.resize(num_cameras);
sim_data_.groundtruth.angular_velocities_.resize(num_cameras); sim_data_.groundtruth.angular_velocities_.resize(num_cameras);
sim_data_.groundtruth.linear_velocities_.resize(num_cameras); sim_data_.groundtruth.linear_velocities_.resize(num_cameras);
for(size_t i=0; i<num_cameras; ++i) for (size_t i = 0; i < num_cameras; ++i) {
{ const cv::Size size = cv::Size(
const cv::Size size = cv::Size(camera_rig_->at(i).width(), camera_rig_->at(i).width(),
camera_rig_->at(i).height()); camera_rig_->at(i).height()
);
sim_data_.images.emplace_back(ImagePtr(new Image(size))); sim_data_.images.emplace_back(ImagePtr(new Image(size)));
sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size))); sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size)));
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))); sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))
);
sim_data_.images[i]->setTo(0); sim_data_.images[i]->setTo(0);
sim_data_.depthmaps[i]->setTo(0); sim_data_.depthmaps[i]->setTo(0);
sim_data_.optic_flows[i]->setTo(0); sim_data_.optic_flows[i]->setTo(0);
} }
for(size_t i=0; i<trajectory_dyn_obj_.size(); i++) for (size_t i = 0; i < trajectory_dyn_obj_.size(); i++) {
{ sim_data_.groundtruth.T_W_OBJ_.push_back(Transformation());
sim_data_.groundtruth.T_W_OBJ_.push_back(Transformation()); sim_data_.groundtruth.linear_velocity_obj_.push_back(LinearVelocity(
sim_data_.groundtruth.linear_velocity_obj_.push_back(LinearVelocity()); ));
sim_data_.groundtruth.angular_velocity_obj_.push_back(AngularVelocity()); sim_data_.groundtruth.angular_velocity_obj_.push_back(
} AngularVelocity()
);
}
sim_data_.camera_rig = camera_rig_; sim_data_.camera_rig = camera_rig_;
t_ = trajectory_->start(); t_ = trajectory_->start();
// At the initial time, we sample everything (IMU + frames) // At the initial time, we sample everything (IMU + frames)
sampleImu(); sampleImu();
sampleFrame(); sampleFrame();
setAllUpdated(); setAllUpdated();
if(callback_) if (callback_)
{ callback_(sim_data_);
callback_(sim_data_);
}
}
DataProviderOnlineMoving3DCameraRig::~DataProviderOnlineMoving3DCameraRig()
{
timers_data_provider_.saveToFile("/tmp", "data_provider_online_render.csv");
}
size_t DataProviderOnlineMoving3DCameraRig::numCameras() const
{
if(camera_rig_)
{
return camera_rig_->size();
}
else
{
return 0;
}
}
void DataProviderOnlineMoving3DCameraRig::updateGroundtruth()
{
const Transformation T_W_B = trajectory_->T_W_B(t_);
sim_data_.groundtruth.T_W_B = T_W_B;
const AngularVelocity omega_B = trajectory_->angularVelocity_B(t_);
const LinearVelocity v_B_W = trajectory_->velocity_W(t_);
for(size_t i=0; i<camera_rig_->size(); ++i)
{
sim_data_.groundtruth.T_W_Cs[i] =
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i);
const LinearVelocity v_W = v_B_W + T_W_B.getRotation().rotate(
ze::skewSymmetric(omega_B) * camera_rig_->T_B_C(i).getPosition());
const AngularVelocity omega_C = camera_rig_->T_C_B(i).getRotation().rotate(omega_B);
const LinearVelocity v_C = (camera_rig_->T_C_B(i) * T_W_B.inverse()).getRotation().rotate(v_W);
sim_data_.groundtruth.angular_velocities_[i] = omega_C;
sim_data_.groundtruth.linear_velocities_[i] = v_C;
}
// update poses of dynamic objects
for (size_t i = 0; i < trajectory_dyn_obj_.size(); i++)
{
sim_data_.groundtruth.T_W_OBJ_[i] = trajectory_dyn_obj_[i]->T_W_B(t_);
sim_data_.groundtruth.linear_velocity_obj_[i] = trajectory_dyn_obj_[i]->velocity_W(t_);
sim_data_.groundtruth.angular_velocity_obj_[i] = sim_data_.groundtruth.T_W_OBJ_[i].getRotation().rotate(trajectory_dyn_obj_[i]->angularVelocity_B(t_));
}
}
void DataProviderOnlineMoving3DCameraRig::sampleImu()
{
// Sample new IMU (+ poses, twists, etc.) values,
// Fill in the approriate data in sim_data
auto t = timers_data_provider_[::TimerDataProvider::sample_imu].timeScope();
if(t_ > trajectory_->end())
{
return;
}
updateGroundtruth();
sim_data_.specific_force_corrupted = imu_->specificForceCorrupted(t_);
sim_data_.angular_velocity_corrupted = imu_->angularVelocityCorrupted(t_);
sim_data_.groundtruth.acc_bias = imu_->bias()->accelerometer(t_);
sim_data_.groundtruth.gyr_bias = imu_->bias()->gyroscope(t_);
last_t_imu_ = t_;
}
void DataProviderOnlineMoving3DCameraRig::sampleFrame()
{
// Sample (i.e. render) a new frame (+ depth map),
// Fill in the appropriate data in sim_data
// Compute the optic flow and compute the next latest sampling time in order
// to guarantee that the displacement is bounded by simulation_max_displacement_successive_frames
auto t = timers_data_provider_[::TimerDataProvider::sample_frame].timeScope();
if(t_ > trajectory_->end())
{
return;
}
updateGroundtruth();
{
auto t = timers_data_provider_[::TimerDataProvider::render].timeScope();
for(size_t i=0; i<camera_rig_->size(); ++i)
{
CHECK(renderers_[i]);
// if the renderer provides a function to compute the optic
// flow (for example, the OpenGL renderer which implements
// optic flow computation efficiently using a shader), use that.
// otherwise, compute the optic flow ourselves using the renderer depthmap
if(renderers_[i]->canComputeOpticFlow())
{
renderers_[i]->renderWithFlow(sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
sim_data_.groundtruth.linear_velocities_[i],
sim_data_.groundtruth.angular_velocities_[i],
sim_data_.groundtruth.T_W_OBJ_,
sim_data_.groundtruth.linear_velocity_obj_,
sim_data_.groundtruth.angular_velocity_obj_,
sim_data_.images[i],
sim_data_.depthmaps[i],
sim_data_.optic_flows[i]);
}
else
{
renderers_[i]->render(sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
sim_data_.groundtruth.T_W_OBJ_,
sim_data_.images[i],
sim_data_.depthmaps[i]);
}
// Optionally blur the rendered images slightly
if(FLAGS_simulation_post_gaussian_blur_sigma > 0)
{
gaussianBlur(sim_data_.images[i], FLAGS_simulation_post_gaussian_blur_sigma);
}
} }
}
{ DataProviderOnlineMoving3DCameraRig::~DataProviderOnlineMoving3DCameraRig(
auto t = timers_data_provider_[::TimerDataProvider::optic_flow].timeScope(); ) {
for(size_t i=0; i<camera_rig_->size(); ++i) timers_data_provider_.saveToFile(
{ "/tmp",
CHECK(optic_flow_helpers_[i]); "data_provider_online_render.csv"
if(!renderers_[i]->canComputeOpticFlow()) );
{
optic_flow_helpers_[i]->computeFromDepthAndTwist(sim_data_.groundtruth.angular_velocities_[i],
sim_data_.groundtruth.linear_velocities_[i],
sim_data_.depthmaps[i], sim_data_.optic_flows[i]);
}
} }
}
// Adaptive sampling scheme based on predicted brightness change size_t DataProviderOnlineMoving3DCameraRig::numCameras() const {
if(simulation_adaptive_sampling_method_ == 0) if (camera_rig_)
{ return camera_rig_->size();
// Predict brightness change based on image gradient and optic flow else
std::vector<FloatType> max_dLdts; return 0;
for(size_t i=0; i<camera_rig_->size(); ++i)
{
max_dLdts.push_back(
maxPredictedAbsBrightnessChange(sim_data_.images[i],
sim_data_.optic_flows[i]));
} }
const FloatType max_dLdt = *std::max_element(max_dLdts.begin(),
max_dLdts.end());
VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s"; void DataProviderOnlineMoving3DCameraRig::updateGroundtruth() {
const Transformation T_W_B = trajectory_->T_W_B(t_);
sim_data_.groundtruth.T_W_B = T_W_B;
// Compute next sampling time const AngularVelocity omega_B = trajectory_->angularVelocity_B(t_);
// t_{k+1} = t_k + delta_t where const LinearVelocity v_B_W = trajectory_->velocity_W(t_);
// delta_t = lambda / max(|dL/dt|) for (size_t i = 0; i < camera_rig_->size(); ++i) {
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_dLdt; sim_data_.groundtruth.T_W_Cs[i] =
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms"; sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i);
next_t_flow_ = t_ + delta_t; const LinearVelocity v_W = v_B_W
} + T_W_B.getRotation().rotate(
ze::skewSymmetric(omega_B)
* camera_rig_->T_B_C(i).getPosition()
);
// Adaptive sampling scheme based on optic flow const AngularVelocity omega_C =
else { camera_rig_->T_C_B(i).getRotation().rotate(omega_B);
std::vector<FloatType> max_flow_magnitudes; const LinearVelocity v_C = (camera_rig_->T_C_B(i) * T_W_B.inverse())
for(size_t i=0; i<camera_rig_->size(); ++i) .getRotation()
{ .rotate(v_W);
max_flow_magnitudes.push_back(maxMagnitudeOpticFlow(sim_data_.optic_flows[i]));
sim_data_.groundtruth.angular_velocities_[i] = omega_C;
sim_data_.groundtruth.linear_velocities_[i] = v_C;
}
// update poses of dynamic objects
for (size_t i = 0; i < trajectory_dyn_obj_.size(); i++) {
sim_data_.groundtruth.T_W_OBJ_[i] =
trajectory_dyn_obj_[i]->T_W_B(t_);
sim_data_.groundtruth.linear_velocity_obj_[i] =
trajectory_dyn_obj_[i]->velocity_W(t_);
sim_data_.groundtruth.angular_velocity_obj_[i] =
sim_data_.groundtruth.T_W_OBJ_[i].getRotation().rotate(
trajectory_dyn_obj_[i]->angularVelocity_B(t_)
);
}
} }
const FloatType max_flow_magnitude = *std::max_element(max_flow_magnitudes.begin(), max_flow_magnitudes.end());
VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude << " px/s"; void DataProviderOnlineMoving3DCameraRig::sampleImu() {
// Sample new IMU (+ poses, twists, etc.) values,
// Fill in the approriate data in sim_data
auto t =
timers_data_provider_[::TimerDataProvider::sample_imu].timeScope();
// Compute next sampling time if (t_ > trajectory_->end())
// t_{k+1} = t_k + delta_t where return;
// delta_t = lambda / max(||optic_flow||)
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_flow_magnitude;
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
next_t_flow_ = t_ + delta_t; updateGroundtruth();
} sim_data_.specific_force_corrupted = imu_->specificForceCorrupted(t_);
sim_data_.angular_velocity_corrupted =
imu_->angularVelocityCorrupted(t_);
sim_data_.groundtruth.acc_bias = imu_->bias()->accelerometer(t_);
sim_data_.groundtruth.gyr_bias = imu_->bias()->gyroscope(t_);
last_t_frame_ = t_; last_t_imu_ = t_;
} }
void DataProviderOnlineMoving3DCameraRig::setImuUpdated() void DataProviderOnlineMoving3DCameraRig::sampleFrame() {
{ // Sample (i.e. render) a new frame (+ depth map),
// Set all the IMU-related flags to true, and all the frame-related flags to false // Fill in the appropriate data in sim_data
sim_data_.imu_updated = true; // Compute the optic flow and compute the next latest sampling time in
sim_data_.twists_updated = true; // order to guarantee that the displacement is bounded by
sim_data_.poses_updated = true; // simulation_max_displacement_successive_frames
sim_data_.images_updated = false; auto t =
sim_data_.depthmaps_updated = false; timers_data_provider_[::TimerDataProvider::sample_frame].timeScope(
sim_data_.optic_flows_updated = false; );
}
void DataProviderOnlineMoving3DCameraRig::setFrameUpdated() if (t_ > trajectory_->end())
{ return;
// Set all the frame-related flags to true, and all the IMU-related flags to false
sim_data_.imu_updated = false;
sim_data_.twists_updated = true;
sim_data_.poses_updated = true;
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = true;
sim_data_.optic_flows_updated = true;
}
void DataProviderOnlineMoving3DCameraRig::setAllUpdated() updateGroundtruth();
{
// Set all the flags to true to indicated everything has been changed
sim_data_.imu_updated = true;
sim_data_.twists_updated = true;
sim_data_.poses_updated = true;
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = true;
sim_data_.optic_flows_updated = true;
}
bool DataProviderOnlineMoving3DCameraRig::spinOnce() {
{ auto t =
/* At what time do we need to sample "something" (and what "something"?) timers_data_provider_[::TimerDataProvider::render].timeScope();
We choose the next sampling time by considering the following constraints:
1. The IMU sampling rate must be constant (typically, from 200 Hz to 1 kHz) for (size_t i = 0; i < camera_rig_->size(); ++i) {
2. The frame sampling rate must be greater than a minimum value (typically, from 20 Hz to 100 Hz) CHECK(renderers_[i]);
3. The pixel displacement between two successive frames must be lower than a threshold.
* If the next sample needs to be an IMU sample, we just sample a new IMU value, without regenerating a frame, // if the renderer provides a function to compute the optic
and transmit only the new IMU (+ poses, twists, IMU bias) to the publisher by setting the approriate "data_changed" flags in the // flow (for example, the OpenGL renderer which implements
sim_data structure. // optic flow computation efficiently using a shader), use that.
// otherwise, compute the optic flow ourselves using the
// renderer depthmap
if (renderers_[i]->canComputeOpticFlow()) {
renderers_[i]->renderWithFlow(
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
sim_data_.groundtruth.linear_velocities_[i],
sim_data_.groundtruth.angular_velocities_[i],
sim_data_.groundtruth.T_W_OBJ_,
sim_data_.groundtruth.linear_velocity_obj_,
sim_data_.groundtruth.angular_velocity_obj_,
sim_data_.images[i],
sim_data_.depthmaps[i],
sim_data_.optic_flows[i]
);
} else {
renderers_[i]->render(
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
sim_data_.groundtruth.T_W_OBJ_,
sim_data_.images[i],
sim_data_.depthmaps[i]
);
}
* If the next sample needs to be a frame sample, we render a new frame (+ depth map + optic flow), but not a new IMU value. // Optionally blur the rendered images slightly
This can happen either because if (FLAGS_simulation_post_gaussian_blur_sigma > 0) {
(i) a new frame must be rendered in order to guarantee that the displacement between frames is bounded, or gaussianBlur(
(ii) the frame rate should be higher than a minimum (used-defined) value. sim_data_.images[i],
FLAGS_simulation_post_gaussian_blur_sigma
);
}
}
}
At the beginning of time (t_ = trajectory_->start()), we sample everything (IMU + frame). {
*/ auto t = timers_data_provider_[::TimerDataProvider::optic_flow]
.timeScope();
for (size_t i = 0; i < camera_rig_->size(); ++i) {
CHECK(optic_flow_helpers_[i]);
if (!renderers_[i]->canComputeOpticFlow()) {
optic_flow_helpers_[i]->computeFromDepthAndTwist(
sim_data_.groundtruth.angular_velocities_[i],
sim_data_.groundtruth.linear_velocities_[i],
sim_data_.depthmaps[i],
sim_data_.optic_flows[i]
);
}
}
}
const ze::real_t next_t_imu = last_t_imu_ + dt_imu_; // Adaptive sampling scheme based on predicted brightness change
const ze::real_t next_t_frame = last_t_frame_ + dt_frame_; if (simulation_adaptive_sampling_method_ == 0) {
// Predict brightness change based on image gradient and optic flow
std::vector<FloatType> max_dLdts;
for (size_t i = 0; i < camera_rig_->size(); ++i) {
max_dLdts.push_back(maxPredictedAbsBrightnessChange(
sim_data_.images[i],
sim_data_.optic_flows[i]
));
}
const FloatType max_dLdt =
*std::max_element(max_dLdts.begin(), max_dLdts.end());
VLOG(2) << "t = " << t_; VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s";
VLOG(2) << "next_t_imu = " << next_t_imu;
VLOG(2) << "next_t_frame = " << next_t_frame;
VLOG(2) << "next_t_flow = " << next_t_flow_;
if(next_t_imu < next_t_frame && next_t_imu < next_t_flow_) // Compute next sampling time
{ // t_{k+1} = t_k + delta_t where
VLOG(2) << "Sample IMU"; // delta_t = lambda / max(|dL/dt|)
t_ = next_t_imu; const ze::real_t delta_t =
sampleImu(); simulation_adaptive_sampling_lambda_ / max_dLdt;
setImuUpdated(); VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
}
else if(next_t_flow_ < next_t_imu && next_t_flow_ < next_t_frame)
{
VLOG(2) << "Sample frame (because of optic flow)";
t_ = next_t_flow_;
sampleFrame();
setFrameUpdated();
}
else if(next_t_frame < next_t_imu && next_t_frame < next_t_flow_)
{
VLOG(2) << "Sample frame (because of minimum framerate)";
t_ = next_t_frame;
sampleFrame();
setFrameUpdated();
}
else
{
VLOG(2) << "Sample IMU and frame";
t_ = next_t_frame;
// In that case, we sample everything
sampleImu();
sampleFrame();
setAllUpdated();
}
if(t_ > trajectory_->end()) next_t_flow_ = t_ + delta_t;
{ }
running_ = false;
return false;
}
if(callback_) // Adaptive sampling scheme based on optic flow
{ else {
sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_)); std::vector<FloatType> max_flow_magnitudes;
callback_(sim_data_); for (size_t i = 0; i < camera_rig_->size(); ++i) {
} max_flow_magnitudes.push_back(
else maxMagnitudeOpticFlow(sim_data_.optic_flows[i])
{ );
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available."; }
} const FloatType max_flow_magnitude = *std::max_element(
return true; max_flow_magnitudes.begin(),
} max_flow_magnitudes.end()
);
bool DataProviderOnlineMoving3DCameraRig::ok() const VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude
{ << " px/s";
if (!running_)
{ // Compute next sampling time
VLOG(1) << "Data Provider was paused/terminated."; // t_{k+1} = t_k + delta_t where
return false; // delta_t = lambda / max(||optic_flow||)
} const ze::real_t delta_t =
return true; simulation_adaptive_sampling_lambda_ / max_flow_magnitude;
} VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
next_t_flow_ = t_ + delta_t;
}
last_t_frame_ = t_;
}
void DataProviderOnlineMoving3DCameraRig::setImuUpdated() {
// Set all the IMU-related flags to true, and all the frame-related
// flags to false
sim_data_.imu_updated = true;
sim_data_.twists_updated = true;
sim_data_.poses_updated = true;
sim_data_.images_updated = false;
sim_data_.depthmaps_updated = false;
sim_data_.optic_flows_updated = false;
}
void DataProviderOnlineMoving3DCameraRig::setFrameUpdated() {
// Set all the frame-related flags to true, and all the IMU-related
// flags to false
sim_data_.imu_updated = false;
sim_data_.twists_updated = true;
sim_data_.poses_updated = true;
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = true;
sim_data_.optic_flows_updated = true;
}
void DataProviderOnlineMoving3DCameraRig::setAllUpdated() {
// Set all the flags to true to indicated everything has been changed
sim_data_.imu_updated = true;
sim_data_.twists_updated = true;
sim_data_.poses_updated = true;
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = true;
sim_data_.optic_flows_updated = true;
}
bool DataProviderOnlineMoving3DCameraRig::spinOnce() {
/* At what time do we need to sample "something" (and what "something"?)
We choose the next sampling time by considering the following
constraints:
1. The IMU sampling rate must be constant (typically, from 200 Hz to
1 kHz)
2. The frame sampling rate must be greater than a minimum value
(typically, from 20 Hz to 100 Hz)
3. The pixel displacement between two successive frames must be lower
than a threshold.
* If the next sample needs to be an IMU sample, we just sample a new
IMU value, without regenerating a frame, and transmit only the new
IMU (+ poses, twists, IMU bias) to the publisher by setting the
approriate "data_changed" flags in the sim_data structure.
* If the next sample needs to be a frame sample, we render a new
frame (+ depth map + optic flow), but not a new IMU value. This can
happen either because (i) a new frame must be rendered in order to
guarantee that the displacement between frames is bounded, or (ii)
the frame rate should be higher than a minimum (used-defined) value.
At the beginning of time (t_ = trajectory_->start()), we sample
everything (IMU + frame).
*/
const ze::real_t next_t_imu = last_t_imu_ + dt_imu_;
const ze::real_t next_t_frame = last_t_frame_ + dt_frame_;
VLOG(2) << "t = " << t_;
VLOG(2) << "next_t_imu = " << next_t_imu;
VLOG(2) << "next_t_frame = " << next_t_frame;
VLOG(2) << "next_t_flow = " << next_t_flow_;
if (next_t_imu < next_t_frame && next_t_imu < next_t_flow_) {
VLOG(2) << "Sample IMU";
t_ = next_t_imu;
sampleImu();
setImuUpdated();
} else if (next_t_flow_ < next_t_imu && next_t_flow_ < next_t_frame) {
VLOG(2) << "Sample frame (because of optic flow)";
t_ = next_t_flow_;
sampleFrame();
setFrameUpdated();
} else if (next_t_frame < next_t_imu && next_t_frame < next_t_flow_) {
VLOG(2) << "Sample frame (because of minimum framerate)";
t_ = next_t_frame;
sampleFrame();
setFrameUpdated();
} else {
VLOG(2) << "Sample IMU and frame";
t_ = next_t_frame;
// In that case, we sample everything
sampleImu();
sampleFrame();
setAllUpdated();
}
if (t_ > trajectory_->end()) {
running_ = false;
return false;
}
if (callback_) {
sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_));
callback_(sim_data_);
} else {
LOG_FIRST_N(WARNING, 1)
<< "No camera callback registered but measurements available.";
}
return true;
}
bool DataProviderOnlineMoving3DCameraRig::ok() const {
if (!running_) {
VLOG(1) << "Data Provider was paused/terminated.";
return false;
}
return true;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,177 +1,177 @@
#include <esim/data_provider/data_provider_online_simple.hpp>
#include <ze/common/time_conversions.hpp>
#include <esim/data_provider/renderer_factory.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/data_provider/data_provider_online_simple.hpp>
#include <esim/data_provider/renderer_factory.hpp>
#include <ze/common/time_conversions.hpp>
DECLARE_double(simulation_post_gaussian_blur_sigma); DECLARE_double(simulation_post_gaussian_blur_sigma);
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderOnlineSimple::DataProviderOnlineSimple(ze::real_t simulation_minimum_framerate, DataProviderOnlineSimple::DataProviderOnlineSimple(
int simulation_adaptive_sampling_method, ze::real_t simulation_minimum_framerate,
ze::real_t simulation_adaptive_sampling_lambda) int simulation_adaptive_sampling_method,
: DataProviderBase(DataProviderType::RendererOnline), ze::real_t simulation_adaptive_sampling_lambda
simulation_minimum_framerate_(simulation_minimum_framerate), )
simulation_adaptive_sampling_method_(simulation_adaptive_sampling_method), : DataProviderBase(DataProviderType::RendererOnline),
simulation_adaptive_sampling_lambda_(simulation_adaptive_sampling_lambda), simulation_minimum_framerate_(simulation_minimum_framerate),
dt_frame_(1./simulation_minimum_framerate) simulation_adaptive_sampling_method_(
{ simulation_adaptive_sampling_method
CHECK(simulation_adaptive_sampling_method == 0 ),
|| simulation_adaptive_sampling_method == 1); simulation_adaptive_sampling_lambda_(
simulation_adaptive_sampling_lambda
),
dt_frame_(1. / simulation_minimum_framerate) {
CHECK(
simulation_adaptive_sampling_method == 0
|| simulation_adaptive_sampling_method == 1
);
renderer_ = loadSimpleRendererFromGflags(); renderer_ = loadSimpleRendererFromGflags();
const size_t num_cameras = 1u; const size_t num_cameras = 1u;
for(size_t i=0; i<num_cameras; ++i) for (size_t i = 0; i < num_cameras; ++i) {
{ const cv::Size size =
const cv::Size size = cv::Size(renderer_->getWidth(), cv::Size(renderer_->getWidth(), renderer_->getHeight());
renderer_->getHeight());
sim_data_.images.emplace_back(ImagePtr(new Image(size))); sim_data_.images.emplace_back(ImagePtr(new Image(size)));
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))); sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))
);
sim_data_.images[i]->setTo(0); sim_data_.images[i]->setTo(0);
sim_data_.optic_flows[i]->setTo(0); sim_data_.optic_flows[i]->setTo(0);
} }
// At the initial time, we sample a frame + optic flow map // At the initial time, we sample a frame + optic flow map
t_ = 0.; t_ = 0.;
sampleFrame(); sampleFrame();
setFrameUpdated(); setFrameUpdated();
if(callback_) if (callback_)
{ callback_(sim_data_);
callback_(sim_data_); }
}
}
DataProviderOnlineSimple::~DataProviderOnlineSimple() DataProviderOnlineSimple::~DataProviderOnlineSimple() {}
{
}
size_t DataProviderOnlineSimple::numCameras() const size_t DataProviderOnlineSimple::numCameras() const {
{ return 1u;
return 1u; }
}
bool DataProviderOnlineSimple::sampleFrame() bool DataProviderOnlineSimple::sampleFrame() {
{ // Sample (i.e. render) a new frame (+ optic flow map),
// Sample (i.e. render) a new frame (+ optic flow map), // Fill in the appropriate data in sim_data
// Fill in the appropriate data in sim_data // Compute the optic flow and compute the next latest sampling time in
// Compute the optic flow and compute the next latest sampling time in order // order to guarantee that the displacement is bounded by
// to guarantee that the displacement is bounded by simulation_max_displacement_successive_frames // simulation_max_displacement_successive_frames
CHECK(renderer_); CHECK(renderer_);
bool is_finished = renderer_->render(ze::secToNanosec(t_), bool is_finished = renderer_->render(
sim_data_.images[0], ze::secToNanosec(t_),
sim_data_.optic_flows[0]); sim_data_.images[0],
sim_data_.optic_flows[0]
);
if(is_finished) if (is_finished)
{ return true;
return true;
}
// Optionally blur the rendered images slightly // Optionally blur the rendered images slightly
if(FLAGS_simulation_post_gaussian_blur_sigma > 0) if (FLAGS_simulation_post_gaussian_blur_sigma > 0) {
{ gaussianBlur(
gaussianBlur(sim_data_.images[0], FLAGS_simulation_post_gaussian_blur_sigma); sim_data_.images[0],
} FLAGS_simulation_post_gaussian_blur_sigma
);
}
// Adaptive sampling scheme based on predicted brightness change // Adaptive sampling scheme based on predicted brightness change
if(simulation_adaptive_sampling_method_ == 0) if (simulation_adaptive_sampling_method_ == 0) {
{ // Predict brightness change based on image gradient and optic flow
// Predict brightness change based on image gradient and optic flow const FloatType max_dLdt = maxPredictedAbsBrightnessChange(
const FloatType max_dLdt = maxPredictedAbsBrightnessChange(sim_data_.images[0], sim_data_.images[0],
sim_data_.optic_flows[0]); sim_data_.optic_flows[0]
);
VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s"; VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s";
// Compute next sampling time // Compute next sampling time
// t_{k+1} = t_k + delta_t where // t_{k+1} = t_k + delta_t where
// delta_t = lambda / max(|dL/dt|) // delta_t = lambda / max(|dL/dt|)
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_dLdt; const ze::real_t delta_t =
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms"; simulation_adaptive_sampling_lambda_ / max_dLdt;
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
next_t_adaptive_ = t_ + delta_t; next_t_adaptive_ = t_ + delta_t;
} }
// Adaptive sampling scheme based on optic flow // Adaptive sampling scheme based on optic flow
else { else {
const FloatType max_flow_magnitude = maxMagnitudeOpticFlow(sim_data_.optic_flows[0]); const FloatType max_flow_magnitude =
maxMagnitudeOpticFlow(sim_data_.optic_flows[0]);
VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude << " px/s"; VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude
<< " px/s";
// Compute next sampling time // Compute next sampling time
// t_{k+1} = t_k + delta_t where // t_{k+1} = t_k + delta_t where
// delta_t = lambda / max(||optic_flow||) // delta_t = lambda / max(||optic_flow||)
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_flow_magnitude; const ze::real_t delta_t =
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms"; simulation_adaptive_sampling_lambda_ / max_flow_magnitude;
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
next_t_adaptive_ = t_ + delta_t; next_t_adaptive_ = t_ + delta_t;
} }
last_t_frame_ = t_; last_t_frame_ = t_;
return false; return false;
} }
void DataProviderOnlineSimple::setFrameUpdated() void DataProviderOnlineSimple::setFrameUpdated() {
{ // Set all the frame-related flags to true, and all the IMU-related
// Set all the frame-related flags to true, and all the IMU-related flags to false // flags to false
sim_data_.imu_updated = false; sim_data_.imu_updated = false;
sim_data_.twists_updated = false; sim_data_.twists_updated = false;
sim_data_.poses_updated = false; sim_data_.poses_updated = false;
sim_data_.images_updated = true; sim_data_.images_updated = true;
sim_data_.depthmaps_updated = false; sim_data_.depthmaps_updated = false;
sim_data_.optic_flows_updated = true; sim_data_.optic_flows_updated = true;
} }
bool DataProviderOnlineSimple::spinOnce() bool DataProviderOnlineSimple::spinOnce() {
{ const ze::real_t next_t_frame = last_t_frame_ + dt_frame_;
const ze::real_t next_t_frame = last_t_frame_ + dt_frame_;
VLOG(2) << "t = " << t_; VLOG(2) << "t = " << t_;
VLOG(2) << "next_t_frame = " << next_t_frame; VLOG(2) << "next_t_frame = " << next_t_frame;
VLOG(2) << "next_t_flow = " << next_t_adaptive_; VLOG(2) << "next_t_flow = " << next_t_adaptive_;
if(next_t_adaptive_ < next_t_frame) if (next_t_adaptive_ < next_t_frame) {
{ VLOG(2) << "Sample frame (because of optic flow)";
VLOG(2) << "Sample frame (because of optic flow)"; t_ = next_t_adaptive_;
t_ = next_t_adaptive_; } else {
} VLOG(2) << "Sample frame (because of minimum framerate)";
else t_ = next_t_frame;
{ }
VLOG(2) << "Sample frame (because of minimum framerate)"; bool is_finished = sampleFrame();
t_ = next_t_frame; setFrameUpdated();
}
bool is_finished = sampleFrame();
setFrameUpdated();
if(is_finished) if (is_finished) {
{ running_ = false;
running_ = false; return false;
return false; }
}
if(callback_) if (callback_) {
{ sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_));
sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_)); callback_(sim_data_);
callback_(sim_data_); } else {
} LOG_FIRST_N(WARNING, 1)
else << "No camera callback registered but measurements available.";
{ }
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available."; return true;
} }
return true;
}
bool DataProviderOnlineSimple::ok() const bool DataProviderOnlineSimple::ok() const {
{ if (!running_) {
if (!running_) VLOG(1) << "Data Provider was paused/terminated.";
{ return false;
VLOG(1) << "Data Provider was paused/terminated."; }
return false; return true;
} }
return true;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,242 +1,236 @@
#include <esim/data_provider/data_provider_rosbag.hpp>
#include <ze/common/logging.hpp>
#include <rosbag/query.h>
#include <cv_bridge/cv_bridge.h> #include <cv_bridge/cv_bridge.h>
#include <esim/data_provider/data_provider_rosbag.hpp>
#include <rosbag/query.h>
#include <sensor_msgs/image_encodings.h> #include <sensor_msgs/image_encodings.h>
#include <ze/common/logging.hpp>
#include <ze/common/time_conversions.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/common/path_utils.hpp> #include <ze/common/path_utils.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/common/time_conversions.hpp>
DEFINE_int32(data_source_stop_after_n_frames, -1, DEFINE_int32(
"How many frames should be processed?"); data_source_stop_after_n_frames, -1, "How many frames should be processed?"
DEFINE_double(data_source_start_time_s, 0.0, );
"Start time in seconds"); DEFINE_double(data_source_start_time_s, 0.0, "Start time in seconds");
DEFINE_double(data_source_stop_time_s, 0.0, DEFINE_double(data_source_stop_time_s, 0.0, "Stop time in seconds");
"Stop time in seconds");
namespace event_camera_simulator { namespace event_camera_simulator {
DataProviderRosbag::DataProviderRosbag( DataProviderRosbag::DataProviderRosbag(
const std::string& bag_filename, const std::string& bag_filename,
const std::map<std::string, size_t>& img_topic_camidx_map) const std::map<std::string, size_t>& img_topic_camidx_map
: DataProviderBase(DataProviderType::Rosbag) )
, img_topic_camidx_map_(img_topic_camidx_map) : DataProviderBase(DataProviderType::Rosbag),
{ img_topic_camidx_map_(img_topic_camidx_map) {
loadRosbag(bag_filename); loadRosbag(bag_filename);
std::vector<std::string> topics; std::vector<std::string> topics;
for (auto it : img_topic_camidx_map_) for (auto it : img_topic_camidx_map_) {
{ VLOG(1) << "Subscribing to: " << it.first;
VLOG(1) << "Subscribing to: " << it.first; topics.push_back(it.first);
topics.push_back(it.first); sim_data_.images.emplace_back(ImagePtr(new Image()));
sim_data_.images.emplace_back(ImagePtr(new Image())); }
}
initBagView(topics); initBagView(topics);
}
void DataProviderRosbag::loadRosbag(const std::string& bag_filename)
{
CHECK(ze::fileExists(bag_filename)) << "File does not exist: " << bag_filename;
VLOG(1) << "Opening rosbag: " << bag_filename << " ...";
bag_.reset(new rosbag::Bag);
try
{
bag_->open(bag_filename, rosbag::bagmode::Read);
}
catch (const std::exception e)
{
LOG(FATAL) << "Could not open rosbag " << bag_filename << ": " << e.what();
}
}
void DataProviderRosbag::initBagView(const std::vector<std::string>& topics)
{
bag_view_.reset(new rosbag::View(*bag_, rosbag::TopicQuery(topics)));
if (FLAGS_data_source_start_time_s != 0.0 ||
FLAGS_data_source_stop_time_s != 0.0)
{
CHECK_GE(FLAGS_data_source_start_time_s, 0);
CHECK_GE(FLAGS_data_source_stop_time_s, 0);
// Retrieve begin and end times from the bag file (given the topic query).
const ros::Time absolute_time_offset = bag_view_->getBeginTime();
VLOG(2) << "Bag begin time: " << absolute_time_offset;
const ros::Time absolute_end_time = bag_view_->getEndTime();
VLOG(2) << "Bag end time: " << absolute_end_time;
if (absolute_end_time < absolute_time_offset)
{
LOG(FATAL) << "Invalid bag end time: "
<< absolute_end_time
<< ". Check that the bag file is properly indexed"
<< " by running 'rosbag reindex file.bag'.";
} }
// Compute start and stop time. void DataProviderRosbag::loadRosbag(const std::string& bag_filename) {
LOG(INFO) << "Starting rosbag at time: " << FLAGS_data_source_start_time_s; CHECK(ze::fileExists(bag_filename))
const ros::Duration data_source_start_time(FLAGS_data_source_start_time_s); << "File does not exist: " << bag_filename;
const ros::Time absolute_start_time = VLOG(1) << "Opening rosbag: " << bag_filename << " ...";
data_source_start_time.isZero() ? bag_.reset(new rosbag::Bag);
absolute_time_offset : absolute_time_offset + data_source_start_time; try {
const ros::Duration data_source_stop_time(FLAGS_data_source_stop_time_s); bag_->open(bag_filename, rosbag::bagmode::Read);
const ros::Time absolute_stop_time = } catch (const std::exception e) {
data_source_stop_time.isZero() ? LOG(FATAL) << "Could not open rosbag " << bag_filename << ": "
absolute_end_time : absolute_time_offset + data_source_stop_time; << e.what();
}
// Ensure that the provided stop time is valid.
// When a bag file is corrupted / invalid the bag end time
// cannot be retrieved. Run rosbag info to check if the bag file
// is properly indexed.
if (absolute_stop_time < absolute_start_time)
{
LOG(ERROR) << "Provided stop time is less than bag begin time. "
<< "Please make sure to provide a valid stop time and "
<< "check that the bag file is properly indexed "
<< "by running 'rosbag reindex file.bag'.";
}
else if (absolute_stop_time > absolute_end_time)
{
LOG(ERROR) << "Provided stop time is greater than bag end time. "
<< "Please make sure to provide a valid stop time and "
<< "check that the bag file is properly indexed "
<< "by running 'rosbag reindex file.bag'.";
}
else
{
VLOG(1) << "Absolute start time set to " << absolute_start_time;
VLOG(1) << "Absolute stop time set to " << absolute_stop_time;
} }
// Reset the bag View void DataProviderRosbag::initBagView(const std::vector<std::string>& topics
CHECK_GT(absolute_stop_time, absolute_start_time); ) {
CHECK_LE(absolute_stop_time, absolute_end_time); bag_view_.reset(new rosbag::View(*bag_, rosbag::TopicQuery(topics)));
bag_view_.reset(new rosbag::View(*bag_, rosbag::TopicQuery(topics), if (FLAGS_data_source_start_time_s != 0.0
absolute_start_time, absolute_stop_time)); || FLAGS_data_source_stop_time_s != 0.0) {
} CHECK_GE(FLAGS_data_source_start_time_s, 0);
bag_view_it_ = bag_view_->begin(); CHECK_GE(FLAGS_data_source_stop_time_s, 0);
// Ensure that topics exist // Retrieve begin and end times from the bag file (given the topic
// The connection info only contains topics that are available in the bag // query).
// If a topic is requested that is not avaiable, it does not show up in the info. const ros::Time absolute_time_offset = bag_view_->getBeginTime();
std::vector<const rosbag::ConnectionInfo*> connection_infos = VLOG(2) << "Bag begin time: " << absolute_time_offset;
bag_view_->getConnections(); const ros::Time absolute_end_time = bag_view_->getEndTime();
if (topics.size() != connection_infos.size()) VLOG(2) << "Bag end time: " << absolute_end_time;
{ if (absolute_end_time < absolute_time_offset) {
LOG(ERROR) << "Successfully connected to " << connection_infos.size() << " topics:"; LOG(FATAL) << "Invalid bag end time: " << absolute_end_time
for (const rosbag::ConnectionInfo* info : connection_infos) << ". Check that the bag file is properly indexed"
{ << " by running 'rosbag reindex file.bag'.";
LOG(ERROR) << "*) " << info->topic; }
// Compute start and stop time.
LOG(INFO) << "Starting rosbag at time: "
<< FLAGS_data_source_start_time_s;
const ros::Duration data_source_start_time(
FLAGS_data_source_start_time_s
);
const ros::Time absolute_start_time =
data_source_start_time.isZero()
? absolute_time_offset
: absolute_time_offset + data_source_start_time;
const ros::Duration data_source_stop_time(
FLAGS_data_source_stop_time_s
);
const ros::Time absolute_stop_time =
data_source_stop_time.isZero()
? absolute_end_time
: absolute_time_offset + data_source_stop_time;
// Ensure that the provided stop time is valid.
// When a bag file is corrupted / invalid the bag end time
// cannot be retrieved. Run rosbag info to check if the bag file
// is properly indexed.
if (absolute_stop_time < absolute_start_time) {
LOG(ERROR
) << "Provided stop time is less than bag begin time. "
<< "Please make sure to provide a valid stop time and "
<< "check that the bag file is properly indexed "
<< "by running 'rosbag reindex file.bag'.";
} else if (absolute_stop_time > absolute_end_time) {
LOG(ERROR
) << "Provided stop time is greater than bag end time. "
<< "Please make sure to provide a valid stop time and "
<< "check that the bag file is properly indexed "
<< "by running 'rosbag reindex file.bag'.";
} else {
VLOG(1) << "Absolute start time set to " << absolute_start_time;
VLOG(1) << "Absolute stop time set to " << absolute_stop_time;
}
// Reset the bag View
CHECK_GT(absolute_stop_time, absolute_start_time);
CHECK_LE(absolute_stop_time, absolute_end_time);
bag_view_.reset(new rosbag::View(
*bag_,
rosbag::TopicQuery(topics),
absolute_start_time,
absolute_stop_time
));
}
bag_view_it_ = bag_view_->begin();
// Ensure that topics exist
// The connection info only contains topics that are available in the
// bag If a topic is requested that is not avaiable, it does not show up
// in the info.
std::vector<const rosbag::ConnectionInfo*> connection_infos =
bag_view_->getConnections();
if (topics.size() != connection_infos.size()) {
LOG(ERROR) << "Successfully connected to "
<< connection_infos.size() << " topics:";
for (const rosbag::ConnectionInfo* info : connection_infos)
LOG(ERROR) << "*) " << info->topic;
LOG(ERROR) << "Requested " << topics.size() << " topics:";
for (const std::string topic : topics)
LOG(ERROR) << "*) " << topic;
LOG(FATAL
) << "Not all requested topics founds in bagfile. "
<< "Is topic_cam0, topic_imu0, etc. set correctly? "
<< "Maybe removing/adding a slash as prefix solves the problem.";
}
} }
LOG(ERROR) << "Requested " << topics.size() << " topics:";
for (const std::string topic : topics) size_t DataProviderRosbag::numCameras() const {
{ return img_topic_camidx_map_.size();
LOG(ERROR) << "*) " << topic;
} }
LOG(FATAL) << "Not all requested topics founds in bagfile. "
<< "Is topic_cam0, topic_imu0, etc. set correctly? "
<< "Maybe removing/adding a slash as prefix solves the problem.";
}
}
size_t DataProviderRosbag::numCameras() const bool DataProviderRosbag::spinOnce() {
{ if (bag_view_it_ != bag_view_->end()) {
return img_topic_camidx_map_.size(); const rosbag::MessageInstance m = *bag_view_it_;
}
bool DataProviderRosbag::spinOnce() // Camera Messages:
{ const sensor_msgs::ImageConstPtr m_img =
if (bag_view_it_ != bag_view_->end()) m.instantiate<sensor_msgs::Image>();
{ if (m_img && callback_) {
const rosbag::MessageInstance m = *bag_view_it_; if (!cameraSpin(m_img, m))
return false;
} else {
LOG_FIRST_N(
WARNING,
1
) << "No camera callback registered but measurements available";
}
// Camera Messages: ++bag_view_it_;
const sensor_msgs::ImageConstPtr m_img = m.instantiate<sensor_msgs::Image>(); return true;
if (m_img && callback_) }
{
if (!cameraSpin(m_img, m))
{
return false; return false;
}
}
else
{
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available";
} }
++bag_view_it_; bool DataProviderRosbag::cameraSpin(
return true; sensor_msgs::ImageConstPtr m_img, const rosbag::MessageInstance& m
} ) {
return false; auto it = img_topic_camidx_map_.find(m.getTopic());
} if (it != img_topic_camidx_map_.end()) {
++n_processed_images_;
if (FLAGS_data_source_stop_after_n_frames > 0
&& n_processed_images_
> FLAGS_data_source_stop_after_n_frames) {
LOG(WARNING)
<< "Data source has reached max number of desired frames.";
running_ = false;
return false;
}
bool DataProviderRosbag::cameraSpin(sensor_msgs::ImageConstPtr m_img, cv_bridge::CvImagePtr cv_ptr;
const rosbag::MessageInstance& m)
{ try {
auto it = img_topic_camidx_map_.find(m.getTopic()); cv_ptr = cv_bridge::toCvCopy(
if (it != img_topic_camidx_map_.end()) m_img,
{ sensor_msgs::image_encodings::MONO8
++n_processed_images_; );
if (FLAGS_data_source_stop_after_n_frames > 0 && } catch (cv_bridge::Exception& e) {
n_processed_images_ > FLAGS_data_source_stop_after_n_frames) LOG(WARNING) << "cv_bridge exception: %s", e.what();
{ return false;
LOG(WARNING) << "Data source has reached max number of desired frames."; }
running_ = false;
return false; cv_ptr->image.convertTo(
*(sim_data_.images[0]),
cv::DataType<ImageFloatType>::type,
1. / 255.
);
sim_data_.timestamp =
static_cast<Time>(m_img->header.stamp.toNSec());
sim_data_.imu_updated = false;
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = false;
sim_data_.optic_flows_updated = false;
sim_data_.twists_updated = false;
sim_data_.poses_updated = false;
callback_(sim_data_);
} else {
LOG_FIRST_N(WARNING, 1)
<< "Topic in bag that is not subscribed: " << m.getTopic();
}
return true;
} }
cv_bridge::CvImagePtr cv_ptr; bool DataProviderRosbag::ok() const {
if (!running_) {
try VLOG(1) << "Data Provider was paused/terminated.";
{ return false;
cv_ptr = cv_bridge::toCvCopy(m_img, sensor_msgs::image_encodings::MONO8); }
} if (bag_view_it_ == bag_view_->end()) {
catch (cv_bridge::Exception& e) VLOG(1) << "All data processed.";
{ return false;
LOG(WARNING) << "cv_bridge exception: %s", e.what(); }
return false; return true;
} }
cv_ptr->image.convertTo(*(sim_data_.images[0]), cv::DataType<ImageFloatType>::type, 1./255.); size_t DataProviderRosbag::size() const {
sim_data_.timestamp = static_cast<Time>(m_img->header.stamp.toNSec()); CHECK(bag_view_);
return bag_view_->size();
sim_data_.imu_updated = false; }
sim_data_.images_updated = true;
sim_data_.depthmaps_updated = false;
sim_data_.optic_flows_updated = false;
sim_data_.twists_updated = false;
sim_data_.poses_updated = false;
callback_(sim_data_);
}
else
{
LOG_FIRST_N(WARNING, 1) << "Topic in bag that is not subscribed: " << m.getTopic();
}
return true;
}
bool DataProviderRosbag::ok() const
{
if (!running_)
{
VLOG(1) << "Data Provider was paused/terminated.";
return false;
}
if (bag_view_it_ == bag_view_->end())
{
VLOG(1) << "All data processed.";
return false;
}
return true;
}
size_t DataProviderRosbag::size() const
{
CHECK(bag_view_);
return bag_view_->size();
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,199 +1,271 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/data_provider/renderer_factory.hpp> #include <esim/data_provider/renderer_factory.hpp>
#include <esim/imp_planar_renderer/planar_renderer.hpp>
#include <esim/imp_panorama_renderer/panorama_renderer.hpp>
#include <esim/imp_opengl_renderer/opengl_renderer.hpp>
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
#include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp> #include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp>
#include <ze/cameras/camera_models.hpp> #include <esim/imp_opengl_renderer/opengl_renderer.hpp>
#include <ze/cameras/camera_impl.hpp> #include <esim/imp_panorama_renderer/panorama_renderer.hpp>
#include <ze/common/logging.hpp> #include <esim/imp_planar_renderer/planar_renderer.hpp>
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp> #include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <ze/cameras/camera_impl.hpp>
#include <ze/cameras/camera_models.hpp>
#include <ze/common/logging.hpp>
DEFINE_int32(renderer_type, 0, " 0: Planar renderer, 1: Panorama renderer, 2: OpenGL renderer"); DEFINE_int32(
renderer_type,
0,
" 0: Planar renderer, 1: Panorama renderer, 2: OpenGL renderer"
);
DEFINE_string(renderer_texture, "", DEFINE_string(
"Path to image which will be used to texture the plane"); renderer_texture,
"",
"Path to image which will be used to texture the plane"
);
DEFINE_double(renderer_hfov_cam_source_deg, 130.0, DEFINE_double(
"Horizontal FoV of the source camera (that captured the image on the plane)"); renderer_hfov_cam_source_deg,
130.0,
"Horizontal FoV of the source camera (that captured the image on "
"the plane)"
);
DEFINE_double(renderer_preprocess_median_blur, 0, DEFINE_double(
"Kernel size of the preprocessing median blur."); renderer_preprocess_median_blur,
0,
"Kernel size of the preprocessing median blur."
);
DEFINE_double(renderer_preprocess_gaussian_blur, 0, DEFINE_double(
"Amount of preprocessing Gaussian blur."); renderer_preprocess_gaussian_blur,
0,
"Amount of preprocessing Gaussian blur."
);
DEFINE_double(renderer_plane_x, 0.0, DEFINE_double(
"x position of the center of the plane, in world coordinates"); renderer_plane_x,
0.0,
"x position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_y, 0.0, DEFINE_double(
"y position of the center of the plane, in world coordinates"); renderer_plane_y,
0.0,
"y position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_z, -1.0, DEFINE_double(
"z position of the center of the plane, in world coordinates"); renderer_plane_z,
-1.0,
"z position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_qw, 0.0, DEFINE_double(
"w component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qw,
0.0,
"w component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qx, 1.0, DEFINE_double(
"x component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qx,
1.0,
"x component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qy, 0.0, DEFINE_double(
"y component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qy,
0.0,
"y component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qz, 0.0, DEFINE_double(
"z component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qz,
0.0,
"z component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_z_min, 0.01, DEFINE_double(renderer_z_min, 0.01, "Minimum clipping distance.");
"Minimum clipping distance.");
DEFINE_double(renderer_z_max, 10.0, DEFINE_double(renderer_z_max, 10.0, "Maximum clipping distance.");
"Maximum clipping distance.");
DEFINE_bool(renderer_extend_border, false, DEFINE_bool(
"Whether to extend the borders of the plane to infinity or not."); renderer_extend_border,
false,
"Whether to extend the borders of the plane to infinity or not."
);
DEFINE_double(renderer_panorama_qw, 0.0, DEFINE_double(
"w component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qw,
0.0,
"w component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qx, 1.0, DEFINE_double(
"x component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qx,
1.0,
"x component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qy, 0.0, DEFINE_double(
"y component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qy,
0.0,
"y component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qz, 0.0, DEFINE_double(
"z component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qz,
0.0,
"z component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img) bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img) {
{ CHECK(img);
CHECK(img); VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture
VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture << "."; << ".";
*img = cv::imread(path_to_img, 0); *img = cv::imread(path_to_img, 0);
if(!img->data) if (!img->data) {
{ LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture
LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture << "."; << ".";
return false; return false;
} }
if(FLAGS_renderer_preprocess_median_blur > 1) if (FLAGS_renderer_preprocess_median_blur > 1) {
{ VLOG(1) << "Pre-filtering the texture with median filter of size: "
VLOG(1) << "Pre-filtering the texture with median filter of size: " << FLAGS_renderer_preprocess_median_blur << ".";
<< FLAGS_renderer_preprocess_median_blur << "."; cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur);
cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur); }
}
if(FLAGS_renderer_preprocess_gaussian_blur > 0) if (FLAGS_renderer_preprocess_gaussian_blur > 0) {
{ VLOG(1
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: " ) << "Pre-filtering the texture with gaussian filter of size: "
<< FLAGS_renderer_preprocess_gaussian_blur << "."; << FLAGS_renderer_preprocess_gaussian_blur << ".";
cv::GaussianBlur(*img, *img, cv::Size(21,21), FLAGS_renderer_preprocess_gaussian_blur); cv::GaussianBlur(
} *img,
*img,
cv::Size(21, 21),
FLAGS_renderer_preprocess_gaussian_blur
);
}
img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0/255.0); img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0 / 255.0);
return true; return true;
}
Renderer::Ptr loadRendererFromGflags()
{
Renderer::Ptr renderer;
switch (FLAGS_renderer_type)
{
case 0: // Planar renderer
{
cv::Mat img_src;
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
{
return nullptr;
}
double f_src = hfovToFocalLength(FLAGS_renderer_hfov_cam_source_deg, img_src.cols);
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
img_src.cols, img_src.rows, ze::CameraType::Pinhole,
(Vector4() << f_src, f_src, 0.5 * img_src.cols, 0.5 * img_src.rows).finished(),
ze::VectorX());
Transformation T_W_P;
T_W_P.getPosition() = ze::Position(FLAGS_renderer_plane_x,
FLAGS_renderer_plane_y,
FLAGS_renderer_plane_z);
T_W_P.getRotation() = ze::Quaternion(FLAGS_renderer_plane_qw,
FLAGS_renderer_plane_qx,
FLAGS_renderer_plane_qy,
FLAGS_renderer_plane_qz);
renderer.reset(new PlanarRenderer(img_src, cam_src,
T_W_P,
FLAGS_renderer_z_min,
FLAGS_renderer_z_max,
FLAGS_renderer_extend_border));
break;
} }
case 1: // Panorama renderer
{
cv::Mat img_src;
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
{
return nullptr;
}
Transformation::Rotation R_W_P; Renderer::Ptr loadRendererFromGflags() {
R_W_P = ze::Quaternion(FLAGS_renderer_panorama_qw, Renderer::Ptr renderer;
FLAGS_renderer_panorama_qx,
FLAGS_renderer_panorama_qy,
FLAGS_renderer_panorama_qz);
renderer.reset(new PanoramaRenderer(img_src, R_W_P)); switch (FLAGS_renderer_type) {
break; case 0: // Planar renderer
{
cv::Mat img_src;
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
return nullptr;
double f_src = hfovToFocalLength(
FLAGS_renderer_hfov_cam_source_deg,
img_src.cols
);
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
img_src.cols,
img_src.rows,
ze::CameraType::Pinhole,
(Vector4() << f_src,
f_src,
0.5 * img_src.cols,
0.5 * img_src.rows)
.finished(),
ze::VectorX()
);
Transformation T_W_P;
T_W_P.getPosition() = ze::Position(
FLAGS_renderer_plane_x,
FLAGS_renderer_plane_y,
FLAGS_renderer_plane_z
);
T_W_P.getRotation() = ze::Quaternion(
FLAGS_renderer_plane_qw,
FLAGS_renderer_plane_qx,
FLAGS_renderer_plane_qy,
FLAGS_renderer_plane_qz
);
renderer.reset(new PlanarRenderer(
img_src,
cam_src,
T_W_P,
FLAGS_renderer_z_min,
FLAGS_renderer_z_max,
FLAGS_renderer_extend_border
));
break;
}
case 1: // Panorama renderer
{
cv::Mat img_src;
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
return nullptr;
Transformation::Rotation R_W_P;
R_W_P = ze::Quaternion(
FLAGS_renderer_panorama_qw,
FLAGS_renderer_panorama_qx,
FLAGS_renderer_panorama_qy,
FLAGS_renderer_panorama_qz
);
renderer.reset(new PanoramaRenderer(img_src, R_W_P));
break;
}
case 2: // OpenGL renderer
{
renderer.reset(new OpenGLRenderer());
break;
}
case 3: // UnrealCV renderer
{
renderer.reset(new UnrealCvRenderer());
break;
}
default: {
LOG(FATAL) << "Renderer type is not known.";
break;
}
}
return renderer;
} }
case 2: // OpenGL renderer
{
renderer.reset(new OpenGLRenderer());
break;
}
case 3: // UnrealCV renderer
{
renderer.reset(new UnrealCvRenderer());
break;
}
default:
{
LOG(FATAL) << "Renderer type is not known.";
break;
}
}
return renderer; SimpleRenderer::Ptr loadSimpleRendererFromGflags() {
} SimpleRenderer::Ptr renderer;
SimpleRenderer::Ptr loadSimpleRendererFromGflags() switch (FLAGS_renderer_type) {
{ case 0: // Multi-objects 2D renderer
SimpleRenderer::Ptr renderer; {
renderer.reset(new MultiObject2DRenderer());
switch (FLAGS_renderer_type) break;
{ }
case 0: // Multi-objects 2D renderer default: {
{ LOG(FATAL) << "Renderer type is not known.";
renderer.reset(new MultiObject2DRenderer()); break;
}
}
break; return renderer;
} }
default:
{
LOG(FATAL) << "Renderer type is not known.";
break;
}
}
return renderer;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,49 +1,55 @@
#pragma once #pragma once
#include <ze/common/macros.hpp>
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <ze/common/macros.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
//! Represents a rendering engine that generates images (and other outputs, //! Represents a rendering engine that generates images (and other outputs,
//! such as depth maps, or optical flow maps) given a scene and a camera position. //! such as depth maps, or optical flow maps) given a scene and a camera
class Renderer //! position.
{ class Renderer {
public: public:
ZE_POINTER_TYPEDEFS(Renderer); ZE_POINTER_TYPEDEFS(Renderer);
Renderer() {} Renderer() {}
//! Render an image at a given pose. //! Render an image at a given pose.
virtual void render(const Transformation& T_W_C, virtual void render(
const std::vector<Transformation>& T_W_OBJ, const Transformation& T_W_C,
const ImagePtr& out_image, const std::vector<Transformation>& T_W_OBJ,
const DepthmapPtr& out_depthmap) const = 0; const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const = 0;
//! Returns true if the rendering engine can compute optic flow, false
//! otherwise
virtual bool canComputeOpticFlow() const = 0;
//! Returns true if the rendering engine can compute optic flow, false otherwise //! Render an image + depth map + optic flow map at a given pose,
virtual bool canComputeOpticFlow() const = 0; //! given the camera linear and angular velocity
virtual void renderWithFlow(
const Transformation& T_W_C,
const LinearVelocity& v_WC,
const AngularVelocity& w_WC,
const std::vector<Transformation>& T_W_OBJ,
const std::vector<LinearVelocity>& linear_velocity_obj,
const std::vector<AngularVelocity>& angular_velocity_obj,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map
) const {}
//! Render an image + depth map + optic flow map at a given pose, //! Sets the camera
//! given the camera linear and angular velocity virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
virtual void renderWithFlow(const Transformation& T_W_C,
const LinearVelocity& v_WC,
const AngularVelocity& w_WC,
const std::vector<Transformation>& T_W_OBJ,
const std::vector<LinearVelocity>& linear_velocity_obj,
const std::vector<AngularVelocity>& angular_velocity_obj,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map) const {}
//! Sets the camera //! Get the camera rig
virtual void setCamera(const ze::Camera::Ptr& camera) = 0; const ze::Camera::Ptr& getCamera() const {
return camera_;
}
//! Get the camera rig protected:
const ze::Camera::Ptr& getCamera() const { return camera_; } ze::Camera::Ptr camera_;
};
protected:
ze::Camera::Ptr camera_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,32 +1,35 @@
#pragma once #pragma once
#include <ze/common/macros.hpp>
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <ze/common/macros.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
//! Represents a rendering engine that generates images + optic flow maps //! Represents a rendering engine that generates images + optic flow maps
//! The rendering engine takes care of managing the environment and camera trajectory in the environment //! The rendering engine takes care of managing the environment and camera
class SimpleRenderer //! trajectory in the environment
{ class SimpleRenderer {
public: public:
ZE_POINTER_TYPEDEFS(SimpleRenderer); ZE_POINTER_TYPEDEFS(SimpleRenderer);
SimpleRenderer() {} SimpleRenderer() {}
//! Render an image + optic flow map at a given time t. //! Render an image + optic flow map at a given time t.
//! The rendering engine takes care of generating the camera trajectory, etc. //! The rendering engine takes care of generating the camera trajectory,
virtual bool render(const Time t, //! etc.
const ImagePtr& out_image, virtual bool render(
const OpticFlowPtr& optic_flow_map) const = 0; const Time t,
const ImagePtr& out_image,
const OpticFlowPtr& optic_flow_map
) const = 0;
//! Get the height of the image plane //! Get the height of the image plane
virtual int getWidth() const = 0; virtual int getWidth() const = 0;
//! Get the width of the image plane //! Get the width of the image plane
virtual int getHeight() const = 0; virtual int getHeight() const = 0;
protected: protected:
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,5 +1,3 @@
#include <esim/rendering/renderer_base.hpp> #include <esim/rendering/renderer_base.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {} // namespace event_camera_simulator
} // namespace event_camera_simulator

View File

@ -1,5 +1,3 @@
#include <esim/rendering/simple_renderer_base.hpp> #include <esim/rendering/simple_renderer_base.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {} // namespace event_camera_simulator
} // namespace event_camera_simulator

View File

@ -1,11 +1,12 @@
#pragma once #pragma once
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <ze/vi_simulation/trajectory_simulator.hpp>
#include <ze/vi_simulation/imu_simulator.hpp> #include <ze/vi_simulation/imu_simulator.hpp>
#include <ze/vi_simulation/trajectory_simulator.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
ze::ImuSimulator::Ptr loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr &trajectory); ze::ImuSimulator::Ptr
loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr& trajectory);
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -5,6 +5,9 @@
namespace event_camera_simulator { namespace event_camera_simulator {
std::tuple<ze::TrajectorySimulator::Ptr, std::vector<ze::TrajectorySimulator::Ptr>> loadTrajectorySimulatorFromGflags(); std::tuple<
ze::TrajectorySimulator::Ptr,
std::vector<ze::TrajectorySimulator::Ptr>>
loadTrajectorySimulatorFromGflags();
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,50 +1,54 @@
#include <ze/common/logging.hpp>
#include <esim/trajectory/imu_factory.hpp> #include <esim/trajectory/imu_factory.hpp>
#include <ze/common/logging.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
ze::ImuSimulator::Ptr loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr& trajectory) ze::ImuSimulator::Ptr
{ loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr& trajectory) {
ze::ImuSimulator::Ptr imu; ze::ImuSimulator::Ptr imu;
const ze::real_t gyr_bias_noise_sigma = 0.0000266; const ze::real_t gyr_bias_noise_sigma = 0.0000266;
const ze::real_t acc_bias_noise_sigma = 0.000433; const ze::real_t acc_bias_noise_sigma = 0.000433;
const ze::real_t gyr_noise_sigma = 0.000186; const ze::real_t gyr_noise_sigma = 0.000186;
const ze::real_t acc_noise_sigma = 0.00186; const ze::real_t acc_noise_sigma = 0.00186;
const uint32_t imu_bandwidth_hz = 200; const uint32_t imu_bandwidth_hz = 200;
const ze::real_t gravity_magnitude = 9.81; const ze::real_t gravity_magnitude = 9.81;
ze::ImuBiasSimulator::Ptr bias; ze::ImuBiasSimulator::Ptr bias;
try try {
{ VLOG(1) << "Initialize bias ...";
VLOG(1) << "Initialize bias ..."; bias = std::make_shared<ze::ContinuousBiasSimulator>(
bias = std::make_shared<ze::ContinuousBiasSimulator>( ze::Vector3::Constant(gyr_bias_noise_sigma),
ze::Vector3::Constant(gyr_bias_noise_sigma), ze::Vector3::Constant(acc_bias_noise_sigma),
ze::Vector3::Constant(acc_bias_noise_sigma), trajectory->start(),
trajectory->start(), trajectory->end(),
trajectory->end(), 100
100); // Results in malloc: (trajectory->end() - trajectory->start()) * imu_bandwidth_hz); ); // Results in malloc: (trajectory->end() - trajectory->start()) *
VLOG(1) << "done."; // imu_bandwidth_hz);
} VLOG(1) << "done.";
catch (const std::bad_alloc& e) } catch (const std::bad_alloc& e) {
{ LOG(FATAL
LOG(FATAL) << "Could not create bias because number of samples is too high." ) << "Could not create bias because number of samples is too high."
<< " Allocation failed: " << e.what(); << " Allocation failed: " << e.what();
} }
VLOG(1) << "Initialize IMU ..."; VLOG(1) << "Initialize IMU ...";
imu = std::make_shared<ze::ImuSimulator>( imu = std::make_shared<ze::ImuSimulator>(
trajectory, trajectory,
bias, bias,
ze::RandomVectorSampler<3>::sigmas(ze::Vector3::Constant(acc_noise_sigma)), ze::RandomVectorSampler<3>::sigmas(
ze::RandomVectorSampler<3>::sigmas(ze::Vector3::Constant(gyr_noise_sigma)), ze::Vector3::Constant(acc_noise_sigma)
imu_bandwidth_hz, ),
imu_bandwidth_hz, ze::RandomVectorSampler<3>::sigmas(
gravity_magnitude); ze::Vector3::Constant(gyr_noise_sigma)
VLOG(1) << "done."; ),
imu_bandwidth_hz,
imu_bandwidth_hz,
gravity_magnitude
);
VLOG(1) << "done.";
return imu; return imu;
} }
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -2,235 +2,272 @@
#include <ze/common/csv_trajectory.hpp> #include <ze/common/csv_trajectory.hpp>
#include <ze/common/logging.hpp> #include <ze/common/logging.hpp>
DEFINE_int32(trajectory_type, 0, " 0: Random spline trajectory, 1: Load trajectory from CSV file"); DEFINE_int32(
trajectory_type,
0,
" 0: Random spline trajectory, 1: Load trajectory from CSV file"
);
DEFINE_double(trajectory_length_s, 100.0, DEFINE_double(
"Length of the trajectory, in seconds" trajectory_length_s,
"(used when the trajectory type is random spline)"); 100.0,
"Length of the trajectory, in seconds"
"(used when the trajectory type is random spline)"
);
DEFINE_int32(trajectory_spline_order, 5, DEFINE_int32(trajectory_spline_order, 5, "Spline order of the trajectory");
"Spline order of the trajectory");
DEFINE_double(trajectory_sampling_frequency_hz, 5, DEFINE_double(
"Sampling frequency of the spline trajectory, i.e." trajectory_sampling_frequency_hz,
"number of random poses generated per second along the trajectory"); 5,
"Sampling frequency of the spline trajectory, i.e."
"number of random poses generated per second along the trajectory"
);
DEFINE_int32(trajectory_num_spline_segments, 100, DEFINE_int32(
"Number of spline segments used for the trajectory"); trajectory_num_spline_segments,
100,
"Number of spline segments used for the trajectory"
);
DEFINE_double(trajectory_lambda, 0.1, DEFINE_double(
"Smoothing factor for the spline trajectories." trajectory_lambda,
"Low value = less smooth, high value = more smooth"); 0.1,
"Smoothing factor for the spline trajectories."
"Low value = less smooth, high value = more smooth"
);
DEFINE_double(trajectory_multiplier_x, 1.0, DEFINE_double(
"Scaling factor for the X camera axis"); trajectory_multiplier_x, 1.0, "Scaling factor for the X camera axis"
);
DEFINE_double(trajectory_multiplier_y, 1.0, DEFINE_double(
"Scaling factor for the y camera axis"); trajectory_multiplier_y, 1.0, "Scaling factor for the y camera axis"
);
DEFINE_double(trajectory_multiplier_z, 1.0, DEFINE_double(
"Scaling factor for the z camera axis"); trajectory_multiplier_z, 1.0, "Scaling factor for the z camera axis"
);
DEFINE_double(trajectory_multiplier_wx, 1.0, DEFINE_double(
"Scaling factor for the x orientation axis"); trajectory_multiplier_wx, 1.0, "Scaling factor for the x orientation axis"
);
DEFINE_double(trajectory_multiplier_wy, 1.0, DEFINE_double(
"Scaling factor for the y orientation axis"); trajectory_multiplier_wy, 1.0, "Scaling factor for the y orientation axis"
);
DEFINE_double(trajectory_multiplier_wz, 1.0, DEFINE_double(
"Scaling factor for the z orientation axis"); trajectory_multiplier_wz, 1.0, "Scaling factor for the z orientation axis"
);
DEFINE_double(trajectory_offset_x, 0.0, DEFINE_double(trajectory_offset_x, 0.0, "Offset for the X camera axis");
"Offset for the X camera axis");
DEFINE_double(trajectory_offset_y, 0.0, DEFINE_double(trajectory_offset_y, 0.0, "Offset for the y camera axis");
"Offset for the y camera axis");
DEFINE_double(trajectory_offset_z, 0.0, DEFINE_double(trajectory_offset_z, 0.0, "Offset for the z camera axis");
"Offset for the z camera axis");
DEFINE_double(trajectory_offset_wx, 0.0, DEFINE_double(trajectory_offset_wx, 0.0, "Offset for the x orientation axis");
"Offset for the x orientation axis");
DEFINE_double(trajectory_offset_wy, 0.0, DEFINE_double(trajectory_offset_wy, 0.0, "Offset for the y orientation axis");
"Offset for the y orientation axis");
DEFINE_double(trajectory_offset_wz, 0.0, DEFINE_double(trajectory_offset_wz, 0.0, "Offset for the z orientation axis");
"Offset for the z orientation axis");
DEFINE_string(trajectory_csv_file, "", DEFINE_string(trajectory_csv_file, "", "CSV file containing the trajectory");
"CSV file containing the trajectory");
DEFINE_int32(trajectory_dynamic_objects_type, 1, " 0: Random spline trajectory, 1: Load trajectory from CSV file"); DEFINE_int32(
trajectory_dynamic_objects_type,
1,
" 0: Random spline trajectory, 1: Load trajectory from CSV file"
);
DEFINE_int32(trajectory_dynamic_objects_spline_order, 5, DEFINE_int32(
"Spline order of the trajectory"); trajectory_dynamic_objects_spline_order, 5, "Spline order of the trajectory"
);
DEFINE_int32(trajectory_dynamic_objects_num_spline_segments, 100, DEFINE_int32(
"Number of spline segments used for the trajectory"); trajectory_dynamic_objects_num_spline_segments,
100,
"Number of spline segments used for the trajectory"
);
DEFINE_double(trajectory_dynamic_objects_lambda, 0.1, DEFINE_double(
"Smoothing factor for the spline trajectories." trajectory_dynamic_objects_lambda,
"Low value = less smooth, high value = more smooth"); 0.1,
"Smoothing factor for the spline trajectories."
"Low value = less smooth, high value = more smooth"
);
DEFINE_string(trajectory_dynamic_objects_csv_dir, "", DEFINE_string(
"Directory containing CSV file of trajectory for dynamic objects"); trajectory_dynamic_objects_csv_dir,
"",
"Directory containing CSV file of trajectory for dynamic objects"
);
DEFINE_string(trajectory_dynamic_objects_csv_file, "", DEFINE_string(
"CSV file containing the trajectory"); trajectory_dynamic_objects_csv_file,
"",
"CSV file containing the trajectory"
);
namespace event_camera_simulator { namespace event_camera_simulator {
std::tuple<ze::TrajectorySimulator::Ptr, std::vector<ze::TrajectorySimulator::Ptr>> loadTrajectorySimulatorFromGflags() std::tuple<
{ ze::TrajectorySimulator::Ptr,
ze::TrajectorySimulator::Ptr trajectory; std::vector<ze::TrajectorySimulator::Ptr>>
loadTrajectorySimulatorFromGflags() {
ze::TrajectorySimulator::Ptr trajectory;
std::vector<ze::TrajectorySimulator::Ptr> trajectories_dynamic_objects; std::vector<ze::TrajectorySimulator::Ptr> trajectories_dynamic_objects;
bool dynamic_object = false; bool dynamic_object = false;
size_t p_start, p_end; size_t p_start, p_end;
p_start = 0; p_start = 0;
while(1) while (1) {
{ int trajectory_type;
int trajectory_type; if (dynamic_object) {
if (dynamic_object) trajectory_type = FLAGS_trajectory_dynamic_objects_type;
{ if (trajectory_type != 1) {
trajectory_type = FLAGS_trajectory_dynamic_objects_type; LOG(FATAL) << "Only supporting trajectories of type 1 for "
if (trajectory_type != 1) "dynamic objects!";
{ }
LOG(FATAL) << "Only supporting trajectories of type 1 for dynamic objects!"; } else {
} trajectory_type = FLAGS_trajectory_type;
}
else
{
trajectory_type = FLAGS_trajectory_type;
}
// set path for dynamics objects
std::string csv_path;
bool should_break = false;
if (dynamic_object)
{
if ((p_end = FLAGS_trajectory_dynamic_objects_csv_file.find(";",p_start)) != std::string::npos)
{
csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/" + FLAGS_trajectory_dynamic_objects_csv_file.substr(p_start, p_end - p_start);
p_start = p_end + 1;
}
else
{
csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/" + FLAGS_trajectory_dynamic_objects_csv_file.substr(p_start, p_end - p_start);
should_break = true;
}
}
else
{
csv_path = FLAGS_trajectory_csv_file;
}
switch (trajectory_type)
{
case 0: // Random spline
{
std::shared_ptr<ze::BSplinePoseMinimalRotationVector> pbs =
std::make_shared<ze::BSplinePoseMinimalRotationVector>(FLAGS_trajectory_spline_order);
ze::MatrixX poses;
ze::VectorX times = ze::VectorX::LinSpaced(FLAGS_trajectory_sampling_frequency_hz * FLAGS_trajectory_length_s,
0,
FLAGS_trajectory_length_s);
poses.resize(6, times.size());
poses.setRandom();
poses.row(0) *= FLAGS_trajectory_multiplier_x;
poses.row(1) *= FLAGS_trajectory_multiplier_y;
poses.row(2) *= FLAGS_trajectory_multiplier_z;
poses.row(3) *= FLAGS_trajectory_multiplier_wx;
poses.row(4) *= FLAGS_trajectory_multiplier_wy;
poses.row(5) *= FLAGS_trajectory_multiplier_wz;
poses.row(0).array() += FLAGS_trajectory_offset_x;
poses.row(1).array() += FLAGS_trajectory_offset_y;
poses.row(2).array() += FLAGS_trajectory_offset_z;
poses.row(3).array() += FLAGS_trajectory_offset_wx;
poses.row(4).array() += FLAGS_trajectory_offset_wy;
poses.row(5).array() += FLAGS_trajectory_offset_wz;
pbs->initPoseSpline3(times, poses, FLAGS_trajectory_num_spline_segments, FLAGS_trajectory_lambda);
trajectory.reset(new ze::SplineTrajectorySimulator(pbs));
break;
}
case 1: // Load from CSV file
{
// Create trajectory:
ze::PoseSeries pose_series;
LOG(INFO) << "Reading trajectory from CSV file: " << csv_path;
pose_series.load(csv_path);
ze::StampedTransformationVector poses = pose_series.getStampedTransformationVector();
// Set start time of trajectory to zero.
const int64_t offset = poses.at(0).first;
for (ze::StampedTransformation& it : poses)
{
it.first -= offset;
} }
LOG(INFO) << "Initializing spline from trajectory (this may take some sime)..."; // set path for dynamics objects
std::string csv_path;
bool should_break = false;
int spline_order, num_spline_segments; if (dynamic_object) {
double lambda; if ((p_end = FLAGS_trajectory_dynamic_objects_csv_file
if (dynamic_object) .find(";", p_start))
{ != std::string::npos) {
spline_order = FLAGS_trajectory_dynamic_objects_spline_order; csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/"
num_spline_segments = FLAGS_trajectory_dynamic_objects_num_spline_segments; + FLAGS_trajectory_dynamic_objects_csv_file.substr(
lambda = FLAGS_trajectory_dynamic_objects_lambda; p_start,
} p_end - p_start
else );
{
spline_order = FLAGS_trajectory_spline_order; p_start = p_end + 1;
num_spline_segments = FLAGS_trajectory_num_spline_segments; } else {
lambda = FLAGS_trajectory_lambda; csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/"
+ FLAGS_trajectory_dynamic_objects_csv_file.substr(
p_start,
p_end - p_start
);
should_break = true;
}
} else {
csv_path = FLAGS_trajectory_csv_file;
} }
std::shared_ptr<ze::BSplinePoseMinimalRotationVector> bs = switch (trajectory_type) {
std::make_shared<ze::BSplinePoseMinimalRotationVector>(spline_order); case 0: // Random spline
bs->initPoseSplinePoses(poses, num_spline_segments, lambda);
if (dynamic_object)
{ {
trajectories_dynamic_objects.push_back(std::make_shared<ze::SplineTrajectorySimulator>(bs)); std::shared_ptr<ze::BSplinePoseMinimalRotationVector> pbs =
std::make_shared<ze::BSplinePoseMinimalRotationVector>(
FLAGS_trajectory_spline_order
);
ze::MatrixX poses;
ze::VectorX times = ze::VectorX::LinSpaced(
FLAGS_trajectory_sampling_frequency_hz
* FLAGS_trajectory_length_s,
0,
FLAGS_trajectory_length_s
);
poses.resize(6, times.size());
poses.setRandom();
poses.row(0) *= FLAGS_trajectory_multiplier_x;
poses.row(1) *= FLAGS_trajectory_multiplier_y;
poses.row(2) *= FLAGS_trajectory_multiplier_z;
poses.row(3) *= FLAGS_trajectory_multiplier_wx;
poses.row(4) *= FLAGS_trajectory_multiplier_wy;
poses.row(5) *= FLAGS_trajectory_multiplier_wz;
poses.row(0).array() += FLAGS_trajectory_offset_x;
poses.row(1).array() += FLAGS_trajectory_offset_y;
poses.row(2).array() += FLAGS_trajectory_offset_z;
poses.row(3).array() += FLAGS_trajectory_offset_wx;
poses.row(4).array() += FLAGS_trajectory_offset_wy;
poses.row(5).array() += FLAGS_trajectory_offset_wz;
pbs->initPoseSpline3(
times,
poses,
FLAGS_trajectory_num_spline_segments,
FLAGS_trajectory_lambda
);
trajectory.reset(new ze::SplineTrajectorySimulator(pbs));
break;
} }
else case 1: // Load from CSV file
{ {
trajectory = std::make_shared<ze::SplineTrajectorySimulator>(bs); // Create trajectory:
ze::PoseSeries pose_series;
LOG(INFO) << "Reading trajectory from CSV file: " << csv_path;
pose_series.load(csv_path);
ze::StampedTransformationVector poses =
pose_series.getStampedTransformationVector();
// Set start time of trajectory to zero.
const int64_t offset = poses.at(0).first;
for (ze::StampedTransformation& it : poses)
it.first -= offset;
LOG(INFO) << "Initializing spline from trajectory (this may "
"take some sime)...";
int spline_order, num_spline_segments;
double lambda;
if (dynamic_object) {
spline_order =
FLAGS_trajectory_dynamic_objects_spline_order;
num_spline_segments =
FLAGS_trajectory_dynamic_objects_num_spline_segments;
lambda = FLAGS_trajectory_dynamic_objects_lambda;
} else {
spline_order = FLAGS_trajectory_spline_order;
num_spline_segments = FLAGS_trajectory_num_spline_segments;
lambda = FLAGS_trajectory_lambda;
}
std::shared_ptr<ze::BSplinePoseMinimalRotationVector> bs =
std::make_shared<ze::BSplinePoseMinimalRotationVector>(
spline_order
);
bs->initPoseSplinePoses(poses, num_spline_segments, lambda);
if (dynamic_object) {
trajectories_dynamic_objects.push_back(
std::make_shared<ze::SplineTrajectorySimulator>(bs)
);
} else {
trajectory =
std::make_shared<ze::SplineTrajectorySimulator>(bs);
}
LOG(INFO) << "Done!";
break;
} }
LOG(INFO) << "Done!"; default: {
break; LOG(FATAL) << "Trajectory type is not known.";
} break;
default: }
{ }
LOG(FATAL) << "Trajectory type is not known.";
break; if (!dynamic_object) {
if (!FLAGS_trajectory_dynamic_objects_csv_dir.empty()
&& !FLAGS_trajectory_dynamic_objects_csv_file.empty()) {
dynamic_object = true;
} else {
break;
}
}
if (should_break)
break;
} }
return std::make_tuple(trajectory, trajectories_dynamic_objects);
} }
if (!dynamic_object)
{
if (!FLAGS_trajectory_dynamic_objects_csv_dir.empty() && !FLAGS_trajectory_dynamic_objects_csv_file.empty())
{
dynamic_object = true;
}
else
{
break;
}
}
if (should_break)
{
break;
}
}
return std::make_tuple(trajectory, trajectories_dynamic_objects);
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,77 +1,75 @@
#pragma once #pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <boost/format.hpp> #include <boost/format.hpp>
#include <boost/algorithm/string/predicate.hpp> #include <chrono>
#include <string>
#include <functional> #include <functional>
#include <iostream> #include <iostream>
#include <istream> #include <istream>
#include <ostream>
#include <chrono>
#include <thread>
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <ostream>
#include <string>
#include <thread>
namespace event_camera_simulator { namespace event_camera_simulator {
using boost::asio::ip::tcp; using boost::asio::ip::tcp;
struct CameraData { struct CameraData {
uint32_t id; uint32_t id;
double_t pitch; double_t pitch;
double_t yaw; double_t yaw;
double_t roll; double_t roll;
double_t x; double_t x;
double_t y; double_t y;
double_t z; double_t z;
}; };
class UnrealCvClient {
public:
UnrealCvClient(std::string host, std::string port);
~UnrealCvClient();
class UnrealCvClient { void saveImage(uint32_t camid, std::string path);
public: cv::Mat getImage(uint32_t camid);
UnrealCvClient(std::string host, std::string port); cv::Mat getDepth(uint32_t camid);
~UnrealCvClient(); void setCamera(const CameraData& camera);
void setCameraFOV(float hfov_deg);
void setCameraSize(int width, int height);
void saveImage(uint32_t camid, std::string path); protected:
cv::Mat getImage(uint32_t camid); void sendCommand(std::string command);
cv::Mat getDepth(uint32_t camid);
void setCamera(const CameraData & camera);
void setCameraFOV(float hfov_deg);
void setCameraSize(int width, int height);
protected: template <typename Result>
Result sendCommand(
std::string command,
std::function<Result(std::istream&, uint32_t)> mapf
);
void sendCommand(std::string command); void send(std::string msg, uint32_t counter);
template<typename Result> template <typename Result>
Result sendCommand(std::string command, std::function<Result(std::istream&, uint32_t)> mapf); Result receive(std::function<Result(std::istream&, uint32_t)> parser);
void send(std::string msg, uint32_t counter); // must stand before socket_ because of c++ initialization order
boost::asio::io_service io_service_;
tcp::socket socket_;
mutable uint32_t counter_;
uint32_t delay_;
boost::format angular_format_;
template<typename Result> private:
Result receive(std::function<Result(std::istream&, uint32_t)> parser); void sleep(uint32_t delay);
void handleError(boost::system::error_code ec);
std::string istreamToString(std::istream& stream, uint32_t size);
//must stand before socket_ because of c++ initialization order void parse_npy_header(
boost::asio::io_service io_service_; unsigned char* buffer,
tcp::socket socket_; size_t& word_size,
mutable uint32_t counter_; std::vector<size_t>& shape,
uint32_t delay_; bool& fortran_order
boost::format angular_format_; );
};
private:
void sleep(uint32_t delay);
void handleError(boost::system::error_code ec);
std::string istreamToString(std::istream& stream, uint32_t size);
void parse_npy_header(unsigned char* buffer,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order);
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,368 +1,389 @@
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <glog/logging.h>
#include <cmath> #include <cmath>
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
#include <glog/logging.h>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <regex> #include <regex>
namespace event_camera_simulator { namespace event_camera_simulator {
using boost::asio::ip::tcp; using boost::asio::ip::tcp;
// from: https://www.boost.org/doc/libs/1_47_0/doc/html/boost_asio/reference/connect/overload6.html // from:
struct unrealcv_server_connect_condition // https://www.boost.org/doc/libs/1_47_0/doc/html/boost_asio/reference/connect/overload6.html
{ struct unrealcv_server_connect_condition {
template <typename Iterator> template <typename Iterator>
Iterator operator()( Iterator
const boost::system::error_code& ec, operator()(const boost::system::error_code& ec, Iterator next) {
Iterator next) if (ec)
{ LOG(ERROR) << ec.message();
if(ec) LOG(INFO) << "Trying: " << next->endpoint();
{ return next;
LOG(ERROR) << ec.message(); }
} };
LOG(INFO) << "Trying: " << next->endpoint();
return next;
}
};
UnrealCvClient::UnrealCvClient(std::string host, std::string port) UnrealCvClient::UnrealCvClient(std::string host, std::string port)
: io_service_(), : io_service_(),
socket_(io_service_), socket_(io_service_),
counter_(0), counter_(0),
delay_(30){ delay_(30) {
tcp::resolver resolver(io_service_);
tcp::resolver::query query(host, port);
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
tcp::resolver resolver(io_service_); boost::system::error_code ec;
tcp::resolver::query query(host, port); boost::asio::connect(
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query); socket_,
endpoint_iterator,
unrealcv_server_connect_condition(),
ec
);
if (ec) {
LOG(FATAL) << "Could not connect to UnrealCV server";
return;
}
sleep(500); // long sleep to initiate
boost::system::error_code ec; // receive the first "we are connected" string
boost::asio::connect(socket_, endpoint_iterator, unrealcv_server_connect_condition(), ec); receive<std::string>(
if(ec) [this](std::istream& stream, uint32_t size) -> std::string {
{ return this->istreamToString(stream, size);
LOG(FATAL) << "Could not connect to UnrealCV server"; }
return; );
}
sleep(500); // long sleep to initiate
// receive the first "we are connected" string sleep(delay_);
receive<std::string>([this] (std::istream& stream, uint32_t size) -> std::string { sendCommand("vrun r.AmbientOcclusionLevels 0");
return this->istreamToString(stream, size); sendCommand("vrun r.LensFlareQuality 0");
}); sendCommand("vrun r.DefaultFeature.AntiAliasing 2");
sendCommand("vrun r.DefaultFeature.MotionBlur 0");
sleep(delay_); sendCommand("vrun r.PostProcessAAQuality 6");
sendCommand("vrun r.AmbientOcclusionLevels 0");
sendCommand("vrun r.LensFlareQuality 0");
sendCommand("vrun r.DefaultFeature.AntiAliasing 2");
sendCommand("vrun r.DefaultFeature.MotionBlur 0");
sendCommand("vrun r.PostProcessAAQuality 6");
}
UnrealCvClient::~UnrealCvClient() {
socket_.close();
}
void UnrealCvClient::saveImage(uint32_t camid, std::string path)
{
std::string req = (boost::format("vget /camera/%i/lit %s") % camid % path).str();
sendCommand(req);
}
cv::Mat UnrealCvClient::getImage(uint32_t camid)
{
std::string req = (boost::format("vget /camera/%i/lit png") % camid).str();
return sendCommand<cv::Mat>(req, [](std::istream& stream, uint32_t size){
std::vector<char> data(size);
stream.read(data.data(), size);
cv::Mat matrixPng = cv::imdecode(cv::Mat(data), 1);
return matrixPng.clone();
});
}
cv::Mat UnrealCvClient::getDepth(uint32_t camid)
{
std::string req = (boost::format("vget /camera/%i/depth npy") % camid).str();
return sendCommand<cv::Mat>(req, [this](std::istream& stream, uint32_t size){
std::vector<char> data(size);
stream.read(data.data(), size);
unsigned char* buffer = (unsigned char *)data.data();
/*
* Gather data from the header
*/
std::vector<size_t> img_dims; //if appending, the shape of existing + new data
size_t word_size;
bool fortran_order;
parse_npy_header(buffer, word_size, img_dims, fortran_order);
// https://docs.scipy.org/doc/numpy/neps/npy-format.html
std::string npy_magic(reinterpret_cast<char*>(buffer),6);
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer+6);
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer+7);
uint16_t header_str_len = *reinterpret_cast<uint16_t*>(buffer+8);
std::string header(reinterpret_cast<char*>(buffer+9),header_str_len);
uint16_t header_total_len = 10 + header_str_len;
uint32_t data_length = data.size() - header_total_len;
uint32_t num_pixel = img_dims.at(0) * img_dims.at(1);
/*
* Ensure that the data is okay
*/
if(!(major_version == 1 &&
minor_version == 0 &&
npy_magic.find("NUMPY") != std::string::npos)){
throw std::runtime_error("Npy header data not supported");
} }
if(!(data_length == (num_pixel * sizeof(float_t)))) { UnrealCvClient::~UnrealCvClient() {
throw std::runtime_error("Npy array data shape does not match the image size"); socket_.close();
} }
/* void UnrealCvClient::saveImage(uint32_t camid, std::string path) {
* Read and convert the data std::string req =
*/ (boost::format("vget /camera/%i/lit %s") % camid % path).str();
cv::Mat matrix_f32 = cv::Mat(img_dims.at(0), img_dims.at(1), sendCommand(req);
CV_32F, buffer + header_total_len).clone();
return matrix_f32;
});
}
void UnrealCvClient::setCamera(const CameraData & camera)
{
std::string cam_pose_s = (boost::format("vset /camera/%i/pose %.5f %.5f %.5f %.5f %.5f %.5f") %
camera.id %
camera.x %
camera.y %
camera.z %
camera.pitch %
camera.yaw %
camera.roll).str();
sendCommand(cam_pose_s);
}
void UnrealCvClient::setCameraSize(int width, int height)
{
VLOG(1) << "Setting the camera size to: " << width << "x" << height;
std::string req_size = (boost::format("vrun r.setres %dx%d") %
width %
height).str();
sendCommand(req_size);
}
void UnrealCvClient::setCameraFOV(float hfov_deg)
{
VLOG(1) << "Setting the camera horizontal field of view to: " << hfov_deg << " deg";
const int cam_id = 0;
std::string req_fov = (boost::format("vset /camera/%i/horizontal_fieldofview %.5f") %
cam_id %
hfov_deg).str();
sendCommand(req_fov);
}
void UnrealCvClient::sendCommand(std::string command)
{
if (!(boost::starts_with(command, "vset") || boost::starts_with(command, "vrun"))) {
throw std::runtime_error(
"invalid command: command must start with vget or (vset, vrun)");
}
uint32_t tmpc = counter_++;
VLOG(1) << "SET:" << tmpc << " " << command;
send(command, tmpc);
sleep(delay_);
std::string result_prefix = std::to_string(tmpc) + ":";
/*
* is set command: we never expect something else than "ok",
* we do not use mapf
*/
std::string result = receive<std::string>(
[this] (std::istream& stream, uint32_t size) -> std::string {
return this->istreamToString(stream, size);
});
if (!boost::starts_with(result, result_prefix + "ok")) {
throw std::runtime_error("response identifier is incorrect");
} else {
VLOG(1) << "GET:" << tmpc << " " << "ok";
}
sleep(delay_);
}
template<typename Result>
Result UnrealCvClient::sendCommand(std::string command, std::function<Result(std::istream&, uint32_t)> mapf)
{
if (!(boost::starts_with(command, "vget")))
{
throw std::runtime_error(
"invalid command: command must start with vget or (vset, vrun)");
}
uint32_t tmpc = counter_++;
VLOG(1) << "SET:" << tmpc << " " << command;
send(command, tmpc);
sleep(delay_);
std::string result_prefix = std::to_string(tmpc) + ":";
/*
* is get command: we expect a response that can
* be a string or binary data
*/
Result result = receive<Result>(
[this, result_prefix, mapf] (std::istream& stream, uint32_t size) -> Result {
std::string prefix = istreamToString(stream, result_prefix.size());
size-=result_prefix.size();
if(!boost::starts_with(prefix, result_prefix)) {
throw std::runtime_error("response identifier is incorrect");
} }
return mapf(stream, size); cv::Mat UnrealCvClient::getImage(uint32_t camid) {
}); std::string req =
(boost::format("vget /camera/%i/lit png") % camid).str();
sleep(delay_); return sendCommand<cv::Mat>(
return result; req,
} [](std::istream& stream, uint32_t size) {
std::vector<char> data(size);
stream.read(data.data(), size);
cv::Mat matrixPng = cv::imdecode(cv::Mat(data), 1);
return matrixPng.clone();
}
);
}
void UnrealCvClient::send(std::string msg, uint32_t counter) cv::Mat UnrealCvClient::getDepth(uint32_t camid) {
{ std::string req =
std::string out = std::to_string(counter) + ":" + msg; (boost::format("vget /camera/%i/depth npy") % camid).str();
uint32_t magic = 0x9E2B83C1; return sendCommand<cv::Mat>(
uint32_t size = out.length(); req,
[this](std::istream& stream, uint32_t size) {
std::vector<char> data(size);
stream.read(data.data(), size);
unsigned char* buffer = (unsigned char*) data.data();
boost::asio::streambuf request; /*
std::ostream request_stream(&request); * Gather data from the header
boost::system::error_code ec; */
std::vector<size_t>
img_dims; // if appending, the shape of existing + new data
size_t word_size;
bool fortran_order;
parse_npy_header(buffer, word_size, img_dims, fortran_order);
request_stream.write((char*) &magic, sizeof(magic)); // https://docs.scipy.org/doc/numpy/neps/npy-format.html
request_stream.write((char*) &size, sizeof(size)); std::string npy_magic(reinterpret_cast<char*>(buffer), 6);
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer + 6);
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer + 7);
uint16_t header_str_len =
*reinterpret_cast<uint16_t*>(buffer + 8);
std::string header(
reinterpret_cast<char*>(buffer + 9),
header_str_len
);
request_stream << out; uint16_t header_total_len = 10 + header_str_len;
// Send the request. uint32_t data_length = data.size() - header_total_len;
boost::asio::write(socket_, uint32_t num_pixel = img_dims.at(0) * img_dims.at(1);
request,
boost::asio::transfer_exactly(request.size() + sizeof(magic) + sizeof(size)),
ec);
}
template<typename Result> /*
Result UnrealCvClient::receive(std::function<Result(std::istream&, uint32_t)> parser) * Ensure that the data is okay
{ */
if (!(major_version == 1 && minor_version == 0
&& npy_magic.find("NUMPY") != std::string::npos)) {
throw std::runtime_error("Npy header data not supported");
}
boost::system::error_code ec; if (!(data_length == (num_pixel * sizeof(float_t)))) {
boost::asio::streambuf response; throw std::runtime_error(
"Npy array data shape does not match the image size"
);
}
//first read the 8 byte header /*
boost::asio::read(socket_, response, boost::asio::transfer_exactly(8), ec); * Read and convert the data
handleError(ec); */
cv::Mat matrix_f32 = cv::Mat(
img_dims.at(0),
img_dims.at(1),
CV_32F,
buffer + header_total_len
)
.clone();
uint32_t magic; return matrix_f32;
uint32_t size; }
);
}
// Check that response is OK. void UnrealCvClient::setCamera(const CameraData& camera) {
std::istream response_stream(&response); std::string cam_pose_s =
response_stream.read((char*)&magic, sizeof(magic)); (boost::format("vset /camera/%i/pose %.5f %.5f %.5f %.5f %.5f %.5f")
response_stream.read((char*)&size, sizeof(size)); % camera.id % camera.x % camera.y % camera.z % camera.pitch
% camera.yaw % camera.roll)
.str();
sendCommand(cam_pose_s);
}
boost::asio::read(socket_, response, boost::asio::transfer_exactly(size), ec); void UnrealCvClient::setCameraSize(int width, int height) {
handleError(ec); VLOG(1) << "Setting the camera size to: " << width << "x" << height;
std::string req_size =
(boost::format("vrun r.setres %dx%d") % width % height).str();
sendCommand(req_size);
}
Result res = parser(response_stream, size); void UnrealCvClient::setCameraFOV(float hfov_deg) {
return res; VLOG(1) << "Setting the camera horizontal field of view to: "
} << hfov_deg << " deg";
const int cam_id = 0;
std::string req_fov =
(boost::format("vset /camera/%i/horizontal_fieldofview %.5f")
% cam_id % hfov_deg)
.str();
sendCommand(req_fov);
}
void UnrealCvClient::handleError(boost::system::error_code ec) void UnrealCvClient::sendCommand(std::string command) {
{ if (!(boost::starts_with(command, "vset")
if (ec == boost::asio::error::eof) { || boost::starts_with(command, "vrun"))) {
throw boost::system::system_error(ec); // Some other error. throw std::runtime_error(
} else if (ec) { "invalid command: command must start with vget or (vset, vrun)"
throw boost::system::system_error(ec); // Some other error. );
} }
}
void UnrealCvClient::sleep(uint32_t delay) { uint32_t tmpc = counter_++;
std::this_thread::sleep_for(std::chrono::milliseconds(delay)); VLOG(1) << "SET:" << tmpc << " " << command;
} send(command, tmpc);
sleep(delay_);
std::string result_prefix = std::to_string(tmpc) + ":";
std::string UnrealCvClient::istreamToString( /*
std::istream& stream, uint32_t size) * is set command: we never expect something else than "ok",
{ * we do not use mapf
*/
char buffer[size]; std::string result = receive<std::string>(
stream.read(buffer, size); [this](std::istream& stream, uint32_t size) -> std::string {
return this->istreamToString(stream, size);
}
);
std::stringstream out; if (!boost::starts_with(result, result_prefix + "ok")) {
out << buffer; throw std::runtime_error("response identifier is incorrect");
std::string result = out.str(); } else {
return result; VLOG(1) << "GET:" << tmpc << " "
} << "ok";
}
// from cnpy: https://github.com/rogersce/cnpy/blob/master/cnpy.cpp sleep(delay_);
void UnrealCvClient::parse_npy_header(unsigned char* buffer, }
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order)
{
//std::string magic_string(buffer,6); template <typename Result>
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer+6); Result UnrealCvClient::sendCommand(
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer+7); std::string command, std::function<Result(std::istream&, uint32_t)> mapf
uint16_t header_len = *reinterpret_cast<uint16_t*>(buffer+8); ) {
std::string header(reinterpret_cast<char*>(buffer+9),header_len); if (!(boost::starts_with(command, "vget"))) {
throw std::runtime_error(
"invalid command: command must start with vget or (vset, vrun)"
);
}
size_t loc1, loc2; uint32_t tmpc = counter_++;
VLOG(1) << "SET:" << tmpc << " " << command;
send(command, tmpc);
sleep(delay_);
//fortran order std::string result_prefix = std::to_string(tmpc) + ":";
loc1 = header.find("fortran_order")+16;
fortran_order = (header.substr(loc1,4) == "True" ? true : false);
//shape /*
loc1 = header.find("("); * is get command: we expect a response that can
loc2 = header.find(")"); * be a string or binary data
*/
std::regex num_regex("[0-9][0-9]*"); Result result = receive<Result>(
std::smatch sm; [this,
shape.clear(); result_prefix,
mapf](std::istream& stream, uint32_t size) -> Result {
std::string prefix =
istreamToString(stream, result_prefix.size());
size -= result_prefix.size();
std::string str_shape = header.substr(loc1+1,loc2-loc1-1); if (!boost::starts_with(prefix, result_prefix))
while(std::regex_search(str_shape, sm, num_regex)) throw std::runtime_error("response identifier is incorrect"
{ );
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
//endian, word size, data type return mapf(stream, size);
//byte order code | stands for not applicable. }
//not sure when this applies except for byte array );
loc1 = header.find("descr")+9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
//char type = header[loc1+1]; sleep(delay_);
//assert(type == map_type(T)); return result;
}
std::string str_ws = header.substr(loc1+2); void UnrealCvClient::send(std::string msg, uint32_t counter) {
loc2 = str_ws.find("'"); std::string out = std::to_string(counter) + ":" + msg;
word_size = atoi(str_ws.substr(0,loc2).c_str());
}
uint32_t magic = 0x9E2B83C1;
uint32_t size = out.length();
boost::asio::streambuf request;
std::ostream request_stream(&request);
boost::system::error_code ec;
request_stream.write((char*) &magic, sizeof(magic));
request_stream.write((char*) &size, sizeof(size));
request_stream << out;
// Send the request.
boost::asio::write(
socket_,
request,
boost::asio::transfer_exactly(
request.size() + sizeof(magic) + sizeof(size)
),
ec
);
}
template <typename Result>
Result UnrealCvClient::receive(
std::function<Result(std::istream&, uint32_t)> parser
) {
boost::system::error_code ec;
boost::asio::streambuf response;
// first read the 8 byte header
boost::asio::read(
socket_,
response,
boost::asio::transfer_exactly(8),
ec
);
handleError(ec);
uint32_t magic;
uint32_t size;
// Check that response is OK.
std::istream response_stream(&response);
response_stream.read((char*) &magic, sizeof(magic));
response_stream.read((char*) &size, sizeof(size));
boost::asio::read(
socket_,
response,
boost::asio::transfer_exactly(size),
ec
);
handleError(ec);
Result res = parser(response_stream, size);
return res;
}
void UnrealCvClient::handleError(boost::system::error_code ec) {
if (ec == boost::asio::error::eof)
throw boost::system::system_error(ec); // Some other error.
else if (ec)
throw boost::system::system_error(ec); // Some other error.
}
void UnrealCvClient::sleep(uint32_t delay) {
std::this_thread::sleep_for(std::chrono::milliseconds(delay));
}
std::string
UnrealCvClient::istreamToString(std::istream& stream, uint32_t size) {
char buffer[size];
stream.read(buffer, size);
std::stringstream out;
out << buffer;
std::string result = out.str();
return result;
}
// from cnpy: https://github.com/rogersce/cnpy/blob/master/cnpy.cpp
void UnrealCvClient::parse_npy_header(
unsigned char* buffer,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order
) {
// std::string magic_string(buffer,6);
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer + 6);
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer + 7);
uint16_t header_len = *reinterpret_cast<uint16_t*>(buffer + 8);
std::string header(reinterpret_cast<char*>(buffer + 9), header_len);
size_t loc1, loc2;
// fortran order
loc1 = header.find("fortran_order") + 16;
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
// shape
loc1 = header.find("(");
loc2 = header.find(")");
std::regex num_regex("[0-9][0-9]*");
std::smatch sm;
shape.clear();
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
while (std::regex_search(str_shape, sm, num_regex)) {
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
// endian, word size, data type
// byte order code | stands for not applicable.
// not sure when this applies except for byte array
loc1 = header.find("descr") + 9;
bool littleEndian =
(header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
// char type = header[loc1+1];
// assert(type == map_type(T));
std::string str_ws = header.substr(loc1 + 2);
loc2 = str_ws.find("'");
word_size = atoi(str_ws.substr(0, loc2).c_str());
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -5,31 +5,25 @@ using boost::asio::ip::tcp;
using namespace std; using namespace std;
int main() { int main() {
event_camera_simulator::UnrealCvClient client("localhost", "9000"); event_camera_simulator::UnrealCvClient client("localhost", "9000");
cv::namedWindow("Image", cv::WINDOW_AUTOSIZE ); cv::namedWindow("Image", cv::WINDOW_AUTOSIZE);
cv::namedWindow("Depthmap", cv::WINDOW_AUTOSIZE ); cv::namedWindow("Depthmap", cv::WINDOW_AUTOSIZE);
for(double y = 0.0; y<100.0; y+=10.0) for (double y = 0.0; y < 100.0; y += 10.0) {
{ event_camera_simulator::CameraData test =
event_camera_simulator::CameraData test = {0, {0, 0.0, 0.0, 0.0, 0.0, y, 100.0};
0.0,
0.0,
0.0,
0.0,
y,
100.0};
client.setCamera(test); client.setCamera(test);
cv::Mat img = client.getImage(0); cv::Mat img = client.getImage(0);
cv::imshow("Image", img); cv::imshow("Image", img);
cv::Mat depthmap = client.getDepth(0); cv::Mat depthmap = client.getDepth(0);
cv::normalize(depthmap, depthmap, 0, 255, cv::NORM_MINMAX, CV_8U); cv::normalize(depthmap, depthmap, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::imshow("Depthmap", depthmap); cv::imshow("Depthmap", depthmap);
cv::waitKey(10); cv::waitKey(10);
} }
return 0; return 0;
} }

View File

@ -2,44 +2,68 @@
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <esim/visualization/publisher_interface.hpp> #include <esim/visualization/publisher_interface.hpp>
#include <fstream> #include <fstream>
namespace event_camera_simulator { namespace event_camera_simulator {
class AdaptiveSamplingBenchmarkPublisher : public Publisher class AdaptiveSamplingBenchmarkPublisher : public Publisher {
{ public:
public: using PixelLocation = std::pair<int, int>;
using PixelLocations = std::vector<PixelLocation>;
using PixelLocation = std::pair<int,int>; AdaptiveSamplingBenchmarkPublisher(
using PixelLocations = std::vector<PixelLocation>; const std::string& benchmark_folder,
const std::string& pixels_to_record_filename
);
AdaptiveSamplingBenchmarkPublisher(const std::string &benchmark_folder, ~AdaptiveSamplingBenchmarkPublisher();
const std::string &pixels_to_record_filename);
~AdaptiveSamplingBenchmarkPublisher(); virtual void
imageCallback(const ImagePtrVector& images, Time t) override;
virtual void eventsCallback(const EventsVector& events) override;
virtual void opticFlowCallback(
const OpticFlowPtrVector& optic_flows, Time t
) override;
virtual void imageCallback(const ImagePtrVector& images, Time t) override; virtual void imageCorruptedCallback(
virtual void eventsCallback(const EventsVector& events) override; const ImagePtrVector& corrupted_images, Time t
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override; ) override {}
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override {} virtual void
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {} depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {}
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override {}
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override {}
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override {}
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override {}
static Publisher::Ptr createFromGflags(); virtual void poseCallback(
const Transformation& T_W_B,
const TransformationVector& T_W_Cs,
Time t
) override {}
private: virtual void twistCallback(
std::ofstream events_file_; const AngularVelocityVector& ws,
std::ofstream images_file_; const LinearVelocityVector& vs,
std::ofstream pixel_intensities_file_; Time t
std::ofstream optic_flows_file_; ) override {}
size_t image_index_;
PixelLocations pixels_to_record_; virtual void
}; imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
virtual void cameraInfoCallback(
const ze::CameraRig::Ptr& camera_rig, Time t
) override {}
virtual void pointcloudCallback(
const PointCloudVector& pointclouds, Time t
) override {}
static Publisher::Ptr createFromGflags();
private:
std::ofstream events_file_;
std::ofstream images_file_;
std::ofstream pixel_intensities_file_;
std::ofstream optic_flows_file_;
size_t image_index_;
PixelLocations pixels_to_record_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -5,25 +5,47 @@
namespace event_camera_simulator { namespace event_camera_simulator {
class Publisher class Publisher {
{ public:
public: ZE_POINTER_TYPEDEFS(Publisher);
ZE_POINTER_TYPEDEFS(Publisher);
Publisher() = default; Publisher() = default;
virtual ~Publisher() = default; virtual ~Publisher() = default;
virtual void imageCallback(const ImagePtrVector& images, Time t) {} virtual void imageCallback(const ImagePtrVector& images, Time t) {}
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) {}
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) {}
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) {}
virtual void eventsCallback(const EventsVector& events) {}
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) {}
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) {}
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) {}
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) {}
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) {}
}; virtual void
imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) {
}
virtual void
depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) {}
virtual void
opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) {}
virtual void eventsCallback(const EventsVector& events) {}
virtual void poseCallback(
const Transformation& T_W_B,
const TransformationVector& T_W_Cs,
Time t
) {}
virtual void twistCallback(
const AngularVelocityVector& ws,
const LinearVelocityVector& vs,
Time t
) {}
virtual void
imuCallback(const Vector3& acc, const Vector3& gyr, Time t) {}
virtual void
cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) {}
virtual void
pointcloudCallback(const PointCloudVector& pointclouds, Time t) {}
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -2,58 +2,75 @@
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <esim/visualization/publisher_interface.hpp> #include <esim/visualization/publisher_interface.hpp>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <geometry_msgs/TransformStamped.h> #include <geometry_msgs/TransformStamped.h>
#include <image_transport/image_transport.h>
#include <ros/ros.h>
#include <tf/tf.h> #include <tf/tf.h>
#include <tf/transform_broadcaster.h> #include <tf/transform_broadcaster.h>
namespace event_camera_simulator { namespace event_camera_simulator {
class RosPublisher : public Publisher class RosPublisher : public Publisher {
{ public:
public: RosPublisher(size_t num_cameras);
RosPublisher(size_t num_cameras); ~RosPublisher();
~RosPublisher();
virtual void imageCallback(const ImagePtrVector& images, Time t) override; virtual void
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override; imageCallback(const ImagePtrVector& images, Time t) override;
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override; virtual void imageCorruptedCallback(
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override; const ImagePtrVector& corrupted_images, Time t
virtual void eventsCallback(const EventsVector& events) override; ) override;
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override; virtual void
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override; depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override;
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override; virtual void opticFlowCallback(
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override; const OpticFlowPtrVector& optic_flows, Time t
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override; ) override;
virtual void eventsCallback(const EventsVector& events) override;
virtual void poseCallback(
const Transformation& T_W_B,
const TransformationVector& T_W_Cs,
Time t
) override;
virtual void twistCallback(
const AngularVelocityVector& ws,
const LinearVelocityVector& vs,
Time t
) override;
virtual void
imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override;
virtual void cameraInfoCallback(
const ze::CameraRig::Ptr& camera_rig, Time t
) override;
virtual void pointcloudCallback(
const PointCloudVector& pointclouds, Time t
) override;
private: private:
size_t num_cameras_; size_t num_cameras_;
std::vector<cv::Size> sensor_sizes_; std::vector<cv::Size> sensor_sizes_;
std::shared_ptr<ros::NodeHandle> nh_; std::shared_ptr<ros::NodeHandle> nh_;
std::shared_ptr<image_transport::ImageTransport> it_; std::shared_ptr<image_transport::ImageTransport> it_;
std::vector< std::shared_ptr<ros::Publisher> > event_pub_; std::vector<std::shared_ptr<ros::Publisher>> event_pub_;
std::shared_ptr<ros::Publisher> pose_pub_; std::shared_ptr<ros::Publisher> pose_pub_;
std::shared_ptr<ros::Publisher> imu_pub_; std::shared_ptr<ros::Publisher> imu_pub_;
std::vector< std::shared_ptr<ros::Publisher> > pointcloud_pub_; std::vector<std::shared_ptr<ros::Publisher>> pointcloud_pub_;
std::vector< std::shared_ptr<ros::Publisher> > camera_info_pub_; std::vector<std::shared_ptr<ros::Publisher>> camera_info_pub_;
std::vector< std::shared_ptr<image_transport::Publisher> > image_pub_; std::vector<std::shared_ptr<image_transport::Publisher>> image_pub_;
std::vector< std::shared_ptr<image_transport::Publisher> > image_corrupted_pub_; std::vector<std::shared_ptr<image_transport::Publisher>>
std::vector< std::shared_ptr<image_transport::Publisher> > depthmap_pub_; image_corrupted_pub_;
std::vector< std::shared_ptr<ros::Publisher> > optic_flow_pub_; std::vector<std::shared_ptr<image_transport::Publisher>> depthmap_pub_;
std::vector< std::shared_ptr<ros::Publisher> > twist_pub_; std::vector<std::shared_ptr<ros::Publisher>> optic_flow_pub_;
std::shared_ptr<tf::TransformBroadcaster> tf_broadcaster_; std::vector<std::shared_ptr<ros::Publisher>> twist_pub_;
std::shared_ptr<tf::TransformBroadcaster> tf_broadcaster_;
Time last_published_camera_info_time_; Time last_published_camera_info_time_;
Time last_published_image_time_; Time last_published_image_time_;
Time last_published_corrupted_image_time_; Time last_published_corrupted_image_time_;
Time last_published_depthmap_time_; Time last_published_depthmap_time_;
Time last_published_optic_flow_time_; Time last_published_optic_flow_time_;
Time last_published_pointcloud_time_; Time last_published_pointcloud_time_;
};
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,54 +1,66 @@
#pragma once #pragma once
#include <esim/common/types.hpp>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <sensor_msgs/Image.h>
#include <dvs_msgs/EventArray.h> #include <dvs_msgs/EventArray.h>
#include <esim/common/types.hpp>
#include <esim_msgs/OpticFlow.h>
#include <geometry_msgs/PoseStamped.h> #include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/TwistStamped.h> #include <geometry_msgs/TwistStamped.h>
#include <sensor_msgs/Imu.h> #include <pcl/point_types.h>
#include <pcl_ros/point_cloud.h>
#include <sensor_msgs/CameraInfo.h> #include <sensor_msgs/CameraInfo.h>
#include <esim_msgs/OpticFlow.h> #include <sensor_msgs/Image.h>
#include <sensor_msgs/Imu.h>
namespace event_camera_simulator { namespace event_camera_simulator {
inline std::string getTopicName(int i, const std::string& suffix) inline std::string getTopicName(int i, const std::string& suffix) {
{ std::stringstream ss;
std::stringstream ss; ss << "cam" << i << "/" << suffix;
ss << "cam" << i << "/" << suffix; return ss.str();
return ss.str(); }
}
inline std::string getTopicName(const std::string& prefix, int i, const std::string& suffix) inline std::string
{ getTopicName(const std::string& prefix, int i, const std::string& suffix) {
std::stringstream ss; std::stringstream ss;
ss << prefix << "/" << getTopicName(i, suffix); ss << prefix << "/" << getTopicName(i, suffix);
return ss.str(); return ss.str();
} }
inline ros::Time toRosTime(Time t) inline ros::Time toRosTime(Time t) {
{ ros::Time ros_time;
ros::Time ros_time; ros_time.fromNSec(t);
ros_time.fromNSec(t); return ros_time;
return ros_time; }
}
void pointCloudToMsg(const PointCloud& pointcloud, const std::string& frame_id, Time t,pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg); void pointCloudToMsg(
const PointCloud& pointcloud,
const std::string& frame_id,
Time t,
pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg
);
void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg); void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg);
void depthmapToMsg(const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg); void
depthmapToMsg(const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg);
void opticFlowToMsg(const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg); void
opticFlowToMsg(const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg);
void eventsToMsg(const Events& events, int width, int height, dvs_msgs::EventArrayPtr& msg); void eventsToMsg(
const Events& events,
int width,
int height,
dvs_msgs::EventArrayPtr& msg
);
sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t); sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t);
geometry_msgs::TwistStamped twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t); geometry_msgs::TwistStamped
twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t);
void cameraToMsg(const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg);
void cameraToMsg(
const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg
);
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -6,40 +6,56 @@
namespace event_camera_simulator { namespace event_camera_simulator {
class RosbagWriter : public Publisher class RosbagWriter : public Publisher {
{ public:
public: RosbagWriter(const std::string& path_to_output_bag, size_t num_cameras);
RosbagWriter(const std::string& path_to_output_bag, ~RosbagWriter();
size_t num_cameras);
~RosbagWriter();
virtual void imageCallback(const ImagePtrVector& images, Time t) override; virtual void
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override; imageCallback(const ImagePtrVector& images, Time t) override;
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override; virtual void imageCorruptedCallback(
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override; const ImagePtrVector& corrupted_images, Time t
virtual void eventsCallback(const EventsVector& events) override; ) override;
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override; virtual void
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override; depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override;
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override; virtual void opticFlowCallback(
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override; const OpticFlowPtrVector& optic_flows, Time t
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override; ) override;
virtual void eventsCallback(const EventsVector& events) override;
virtual void poseCallback(
const Transformation& T_W_B,
const TransformationVector& T_W_Cs,
Time t
) override;
virtual void twistCallback(
const AngularVelocityVector& ws,
const LinearVelocityVector& vs,
Time t
) override;
virtual void
imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override;
virtual void cameraInfoCallback(
const ze::CameraRig::Ptr& camera_rig, Time t
) override;
virtual void pointcloudCallback(
const PointCloudVector& pointclouds, Time t
) override;
static Publisher::Ptr createBagWriterFromGflags(size_t num_cameras); static Publisher::Ptr createBagWriterFromGflags(size_t num_cameras);
private: private:
size_t num_cameras_; size_t num_cameras_;
std::vector<cv::Size> sensor_sizes_; std::vector<cv::Size> sensor_sizes_;
rosbag::Bag bag_; rosbag::Bag bag_;
const std::string topic_name_prefix_ = ""; const std::string topic_name_prefix_ = "";
Time last_published_camera_info_time_; Time last_published_camera_info_time_;
Time last_published_image_time_; Time last_published_image_time_;
Time last_published_corrupted_image_time_; Time last_published_corrupted_image_time_;
Time last_published_depthmap_time_; Time last_published_depthmap_time_;
Time last_published_optic_flow_time_; Time last_published_optic_flow_time_;
Time last_published_pointcloud_time_; Time last_published_pointcloud_time_;
};
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -2,44 +2,66 @@
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <esim/visualization/publisher_interface.hpp> #include <esim/visualization/publisher_interface.hpp>
#include <fstream> #include <fstream>
namespace event_camera_simulator { namespace event_camera_simulator {
class SyntheticOpticFlowPublisher : public Publisher class SyntheticOpticFlowPublisher : public Publisher {
{ public:
public: SyntheticOpticFlowPublisher(const std::string& output_folder);
SyntheticOpticFlowPublisher(const std::string &output_folder);
~SyntheticOpticFlowPublisher(); ~SyntheticOpticFlowPublisher();
virtual void imageCallback(const ImagePtrVector& images, Time t) override { virtual void
CHECK_EQ(images.size(), 1); imageCallback(const ImagePtrVector& images, Time t) override {
if(sensor_size_.width == 0 || sensor_size_.height == 0) CHECK_EQ(images.size(), 1);
{ if (sensor_size_.width == 0 || sensor_size_.height == 0)
sensor_size_ = images[0]->size(); sensor_size_ = images[0]->size();
} }
}
virtual void eventsCallback(const EventsVector& events) override; virtual void eventsCallback(const EventsVector& events) override;
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override {}
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override {} virtual void opticFlowCallback(
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {} const OpticFlowPtrVector& optic_flows, Time t
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override {} ) override {}
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override {}
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override {}
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override {}
static Publisher::Ptr createFromGflags(); virtual void imageCorruptedCallback(
const ImagePtrVector& corrupted_images, Time t
) override {}
private: virtual void
std::string output_folder_; depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {}
cv::Size sensor_size_;
std::ofstream events_file_; virtual void poseCallback(
Events events_; // buffer containing all the events since the beginning const Transformation& T_W_B,
}; const TransformationVector& T_W_Cs,
Time t
) override {}
virtual void twistCallback(
const AngularVelocityVector& ws,
const LinearVelocityVector& vs,
Time t
) override {}
virtual void
imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
virtual void cameraInfoCallback(
const ze::CameraRig::Ptr& camera_rig, Time t
) override {}
virtual void pointcloudCallback(
const PointCloudVector& pointclouds, Time t
) override {}
static Publisher::Ptr createFromGflags();
private:
std::string output_folder_;
cv::Size sensor_size_;
std::ofstream events_file_;
Events events_; // buffer containing all the events since the beginning
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,135 +1,151 @@
#include <esim/visualization/adaptive_sampling_benchmark_publisher.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/common/path_utils.hpp> #include <esim/visualization/adaptive_sampling_benchmark_publisher.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/time_conversions.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/highgui/highgui.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp>
#include <ze/common/time_conversions.hpp>
DEFINE_string(adaptive_sampling_benchmark_folder, "", DEFINE_string(
"Folder in which to output the results."); adaptive_sampling_benchmark_folder,
"",
"Folder in which to output the results."
);
DEFINE_string(adaptive_sampling_benchmark_pixels_to_record_file, "", DEFINE_string(
"File containing the pixel locations to record."); adaptive_sampling_benchmark_pixels_to_record_file,
"",
"File containing the pixel locations to record."
);
namespace event_camera_simulator { namespace event_camera_simulator {
AdaptiveSamplingBenchmarkPublisher::AdaptiveSamplingBenchmarkPublisher(const std::string& benchmark_folder, AdaptiveSamplingBenchmarkPublisher::AdaptiveSamplingBenchmarkPublisher(
const std::string& pixels_to_record_filename) const std::string& benchmark_folder,
: image_index_(0) const std::string& pixels_to_record_filename
{ )
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "events.txt"), : image_index_(0) {
&events_file_); ze::openOutputFileStream(
ze::joinPath(benchmark_folder, "events.txt"),
&events_file_
);
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "images.txt"), ze::openOutputFileStream(
&images_file_); ze::joinPath(benchmark_folder, "images.txt"),
&images_file_
);
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "pixel_intensities.txt"), ze::openOutputFileStream(
&pixel_intensities_file_); ze::joinPath(benchmark_folder, "pixel_intensities.txt"),
&pixel_intensities_file_
);
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "optic_flows.txt"), ze::openOutputFileStream(
&optic_flows_file_); ze::joinPath(benchmark_folder, "optic_flows.txt"),
&optic_flows_file_
);
// Load and parse the file containing the list of pixel locations // Load and parse the file containing the list of pixel locations
// whose intensity values to record // whose intensity values to record
std::ifstream pixels_to_record_file; std::ifstream pixels_to_record_file;
if(pixels_to_record_filename != "") if (pixels_to_record_filename != "") {
{ ze::openFileStream(
ze::openFileStream(pixels_to_record_filename, &pixels_to_record_file); pixels_to_record_filename,
&pixels_to_record_file
);
int x, y; int x, y;
LOG(INFO) << "Pixels that will be recorded: "; LOG(INFO) << "Pixels that will be recorded: ";
while(pixels_to_record_file >> x >> y) while (pixels_to_record_file >> x >> y) {
{ LOG(INFO) << x << " , " << y;
LOG(INFO) << x << " , " << y; pixels_to_record_.push_back(PixelLocation(x, y));
pixels_to_record_.push_back(PixelLocation(x,y)); }
}
} }
}
}
Publisher::Ptr AdaptiveSamplingBenchmarkPublisher::createFromGflags() Publisher::Ptr AdaptiveSamplingBenchmarkPublisher::createFromGflags() {
{ if (FLAGS_adaptive_sampling_benchmark_folder == "") {
if(FLAGS_adaptive_sampling_benchmark_folder == "") LOG(WARNING) << "Empty benchmark folder string: will not write "
{ "benchmark files";
LOG(WARNING) << "Empty benchmark folder string: will not write benchmark files"; return nullptr;
return nullptr; }
}
return std::make_shared<AdaptiveSamplingBenchmarkPublisher>(FLAGS_adaptive_sampling_benchmark_folder, return std::make_shared<AdaptiveSamplingBenchmarkPublisher>(
FLAGS_adaptive_sampling_benchmark_pixels_to_record_file); FLAGS_adaptive_sampling_benchmark_folder,
} FLAGS_adaptive_sampling_benchmark_pixels_to_record_file
);
}
AdaptiveSamplingBenchmarkPublisher::~AdaptiveSamplingBenchmarkPublisher() AdaptiveSamplingBenchmarkPublisher::~AdaptiveSamplingBenchmarkPublisher() {
{ // finish writing files
// finish writing files events_file_.close();
events_file_.close(); images_file_.close();
images_file_.close(); pixel_intensities_file_.close();
pixel_intensities_file_.close(); optic_flows_file_.close();
optic_flows_file_.close(); }
}
void AdaptiveSamplingBenchmarkPublisher::imageCallback(const ImagePtrVector& images, Time t) void AdaptiveSamplingBenchmarkPublisher::imageCallback(
{ const ImagePtrVector& images, Time t
CHECK_EQ(images.size(), 1); ) {
images_file_ << t << std::endl; CHECK_EQ(images.size(), 1);
images_file_ << t << std::endl;
ImagePtr img = images[0]; ImagePtr img = images[0];
cv::Mat img_8bit; cv::Mat img_8bit;
img->convertTo(img_8bit, CV_8U, 255); img->convertTo(img_8bit, CV_8U, 255);
if(image_index_ == 0) if (image_index_ == 0) {
{ static const std::vector<int> compression_params = {
static const std::vector<int> compression_params = {cv::IMWRITE_PNG_COMPRESSION, 0}; cv::IMWRITE_PNG_COMPRESSION,
0};
std::stringstream ss; std::stringstream ss;
ss << ze::joinPath(FLAGS_adaptive_sampling_benchmark_folder, "image_"); ss << ze::joinPath(
ss << image_index_ << ".png"; FLAGS_adaptive_sampling_benchmark_folder,
"image_"
);
ss << image_index_ << ".png";
LOG(INFO) << ss.str(); LOG(INFO) << ss.str();
cv::imwrite(ss.str(), img_8bit, compression_params); cv::imwrite(ss.str(), img_8bit, compression_params);
} }
for(const PixelLocation& pixel_loc : pixels_to_record_) for (const PixelLocation& pixel_loc : pixels_to_record_) {
{ // write line in the form "x y I(x,y)"
// write line in the form "x y I(x,y)" const int x = pixel_loc.first;
const int x = pixel_loc.first; const int y = pixel_loc.second;
const int y = pixel_loc.second; pixel_intensities_file_ << x << " " << y << " "
pixel_intensities_file_ << x << " " << (*images[0])(y, x) << std::endl;
<< y << " " }
<< (*images[0])(y,x) << std::endl;
}
image_index_++; image_index_++;
} }
void AdaptiveSamplingBenchmarkPublisher::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) void AdaptiveSamplingBenchmarkPublisher::opticFlowCallback(
{ const OpticFlowPtrVector& optic_flows, Time t
CHECK_EQ(optic_flows.size(), 1); ) {
for(const PixelLocation& pixel_loc : pixels_to_record_) CHECK_EQ(optic_flows.size(), 1);
{ for (const PixelLocation& pixel_loc : pixels_to_record_) {
// write line in the form "x y optic_flow(x,y)[0] optic_flow(x,y)[1]" // write line in the form "x y optic_flow(x,y)[0]
const int x = pixel_loc.first; // optic_flow(x,y)[1]"
const int y = pixel_loc.second; const int x = pixel_loc.first;
optic_flows_file_ << x << " " const int y = pixel_loc.second;
<< y << " " optic_flows_file_ << x << " " << y << " "
<< (*optic_flows[0])(y,x)[0] << " " << (*optic_flows[0])(y, x)[0] << " "
<< (*optic_flows[0])(y,x)[1] << (*optic_flows[0])(y, x)[1] << std::endl;
<< std::endl; }
} }
}
void AdaptiveSamplingBenchmarkPublisher::eventsCallback(
const EventsVector& events
) {
CHECK_EQ(events.size(), 1);
void AdaptiveSamplingBenchmarkPublisher::eventsCallback(const EventsVector& events) for (const Event& e : events[0]) {
{ events_file_ << e.t << " " << e.x << " " << e.y << " "
CHECK_EQ(events.size(), 1); << (e.pol ? 1 : 0) << std::endl;
}
for(const Event& e : events[0]) }
{
events_file_ << e.t << " " << e.x << " " << e.y << " " << (e.pol? 1 : 0) << std::endl;
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,400 +1,417 @@
#include <esim/visualization/ros_publisher.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/common/time_conversions.hpp> #include <esim/visualization/ros_publisher.hpp>
#include <esim/visualization/ros_utils.hpp> #include <esim/visualization/ros_utils.hpp>
#include <minkindr_conversions/kindr_msg.h>
#include <minkindr_conversions/kindr_tf.h>
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <minkindr_conversions/kindr_msg.h>
#include <minkindr_conversions/kindr_tf.h>
#include <ze/common/time_conversions.hpp>
DEFINE_double(ros_publisher_camera_info_rate, 0, DEFINE_double(
"Camera info (maximum) publish rate, in Hz"); ros_publisher_camera_info_rate,
0,
"Camera info (maximum) publish rate, in Hz"
);
DEFINE_double(ros_publisher_frame_rate, 30, DEFINE_double(ros_publisher_frame_rate, 30, "(Maximum) frame rate, in Hz");
"(Maximum) frame rate, in Hz");
DEFINE_double(ros_publisher_depth_rate, 0, DEFINE_double(
"(Maximum) depthmap publish rate, in Hz"); ros_publisher_depth_rate, 0, "(Maximum) depthmap publish rate, in Hz"
);
DEFINE_double(ros_publisher_pointcloud_rate, 0, DEFINE_double(
"(Maximum) point cloud publish rate, in Hz"); ros_publisher_pointcloud_rate,
0,
"(Maximum) point cloud publish rate, in Hz"
);
DEFINE_double(ros_publisher_optic_flow_rate, 0, DEFINE_double(
"(Maximum) optic flow map publish rate, in Hz"); ros_publisher_optic_flow_rate,
0,
"(Maximum) optic flow map publish rate, in Hz"
);
namespace event_camera_simulator { namespace event_camera_simulator {
RosPublisher::RosPublisher(size_t num_cameras) RosPublisher::RosPublisher(size_t num_cameras) {
{ CHECK_GE(num_cameras, 1);
CHECK_GE(num_cameras, 1); num_cameras_ = num_cameras;
num_cameras_ = num_cameras; sensor_sizes_ = std::vector<cv::Size>(num_cameras_);
sensor_sizes_ = std::vector<cv::Size>(num_cameras_);
// Initialize ROS if it was not initialized before. // Initialize ROS if it was not initialized before.
if(!ros::isInitialized()) if (!ros::isInitialized()) {
{ VLOG(1) << "Initializing ROS";
VLOG(1) << "Initializing ROS"; int argc = 0;
int argc = 0; ros::init(
ros::init(argc, nullptr, std::string("ros_publisher"), ros::init_options::NoSigintHandler); argc,
} nullptr,
std::string("ros_publisher"),
ros::init_options::NoSigintHandler
);
}
// Create node and subscribe. // Create node and subscribe.
nh_.reset(new ros::NodeHandle("")); nh_.reset(new ros::NodeHandle(""));
it_.reset(new image_transport::ImageTransport(*nh_)); it_.reset(new image_transport::ImageTransport(*nh_));
// Setup ROS publishers for images, events, poses, depth maps, camera info, etc. // Setup ROS publishers for images, events, poses, depth maps, camera
for(size_t i=0; i<num_cameras_; ++i) // info, etc.
{ for (size_t i = 0; i < num_cameras_; ++i) {
event_pub_.emplace_back( event_pub_.emplace_back(
new ros::Publisher( new ros::Publisher(nh_->advertise<dvs_msgs::EventArray>(
nh_->advertise<dvs_msgs::EventArray> (getTopicName(i, "events"), 0))); getTopicName(i, "events"),
0
))
);
image_pub_.emplace_back( image_pub_.emplace_back(new image_transport::Publisher(
new image_transport::Publisher( it_->advertise(getTopicName(i, "image_raw"), 0)
it_->advertise(getTopicName(i, "image_raw"), 0))); ));
image_corrupted_pub_.emplace_back( image_corrupted_pub_.emplace_back(new image_transport::Publisher(
new image_transport::Publisher( it_->advertise(getTopicName(i, "image_corrupted"), 0)
it_->advertise(getTopicName(i, "image_corrupted"), 0))); ));
depthmap_pub_.emplace_back( depthmap_pub_.emplace_back(new image_transport::Publisher(
new image_transport::Publisher( it_->advertise(getTopicName(i, "depthmap"), 0)
it_->advertise(getTopicName(i, "depthmap"), 0))); ));
optic_flow_pub_.emplace_back( optic_flow_pub_.emplace_back(
new ros::Publisher( new ros::Publisher(nh_->advertise<esim_msgs::OpticFlow>(
nh_->advertise<esim_msgs::OpticFlow> (getTopicName(i, "optic_flow"), 0))); getTopicName(i, "optic_flow"),
0
))
);
camera_info_pub_.emplace_back( camera_info_pub_.emplace_back(
new ros::Publisher( new ros::Publisher(nh_->advertise<sensor_msgs::CameraInfo>(
nh_->advertise<sensor_msgs::CameraInfo> (getTopicName(i, "camera_info"), 0))); getTopicName(i, "camera_info"),
0
))
);
twist_pub_.emplace_back( twist_pub_.emplace_back(
new ros::Publisher( new ros::Publisher(nh_->advertise<geometry_msgs::TwistStamped>(
nh_->advertise<geometry_msgs::TwistStamped> (getTopicName(i, "twist"), 0))); getTopicName(i, "twist"),
0
))
);
pointcloud_pub_.emplace_back( pointcloud_pub_.emplace_back(new ros::Publisher(
new ros::Publisher( nh_->advertise<pcl::PointCloud<pcl::PointXYZ>>(
nh_->advertise<pcl::PointCloud<pcl::PointXYZ>> (getTopicName(i, "pointcloud"), 0))); getTopicName(i, "pointcloud"),
} 0
)
));
}
pose_pub_.reset(new ros::Publisher(nh_->advertise<geometry_msgs::PoseStamped> ("pose", 0))); pose_pub_.reset(new ros::Publisher(
imu_pub_.reset(new ros::Publisher(nh_->advertise<sensor_msgs::Imu> ("imu", 0))); nh_->advertise<geometry_msgs::PoseStamped>("pose", 0)
tf_broadcaster_.reset(new tf::TransformBroadcaster()); ));
imu_pub_.reset(
new ros::Publisher(nh_->advertise<sensor_msgs::Imu>("imu", 0))
);
tf_broadcaster_.reset(new tf::TransformBroadcaster());
last_published_camera_info_time_ = 0; last_published_camera_info_time_ = 0;
last_published_image_time_ = 0; last_published_image_time_ = 0;
last_published_corrupted_image_time_ = 0; last_published_corrupted_image_time_ = 0;
last_published_depthmap_time_ = 0; last_published_depthmap_time_ = 0;
last_published_optic_flow_time_ = 0; last_published_optic_flow_time_ = 0;
last_published_pointcloud_time_ = 0; last_published_pointcloud_time_ = 0;
}
RosPublisher::~RosPublisher()
{
for(size_t i=0; i<num_cameras_; ++i)
{
event_pub_[i]->shutdown();
image_pub_[i]->shutdown();
image_corrupted_pub_[i]->shutdown();
depthmap_pub_[i]->shutdown();
optic_flow_pub_[i]->shutdown();
camera_info_pub_[i]->shutdown();
twist_pub_[i]->shutdown();
pointcloud_pub_[i]->shutdown();
}
pose_pub_->shutdown();
ros::shutdown();
}
void RosPublisher::pointcloudCallback(const PointCloudVector& pointclouds, Time t)
{
CHECK_EQ(pointcloud_pub_.size(), num_cameras_);
CHECK_EQ(pointclouds.size(), num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
const PointCloud& pcl_camera = pointclouds[i];
CHECK(pointcloud_pub_[i]);
if(pointcloud_pub_[i]->getNumSubscribers() == 0)
{
continue;
} }
Duration min_time_interval_between_published_pointclouds_ RosPublisher::~RosPublisher() {
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate); for (size_t i = 0; i < num_cameras_; ++i) {
if(last_published_pointcloud_time_ > 0 && t - last_published_pointcloud_time_ < min_time_interval_between_published_pointclouds_) event_pub_[i]->shutdown();
{ image_pub_[i]->shutdown();
return; image_corrupted_pub_[i]->shutdown();
depthmap_pub_[i]->shutdown();
optic_flow_pub_[i]->shutdown();
camera_info_pub_[i]->shutdown();
twist_pub_[i]->shutdown();
pointcloud_pub_[i]->shutdown();
}
pose_pub_->shutdown();
ros::shutdown();
} }
pcl::PointCloud<pcl::PointXYZRGB>::Ptr msg (new pcl::PointCloud<pcl::PointXYZRGB>); void RosPublisher::pointcloudCallback(
std::stringstream ss; ss << "cam" << i; const PointCloudVector& pointclouds, Time t
pointCloudToMsg(pointclouds[i], ss.str(), t, msg); ) {
pointcloud_pub_[i]->publish(msg); CHECK_EQ(pointcloud_pub_.size(), num_cameras_);
} CHECK_EQ(pointclouds.size(), num_cameras_);
last_published_pointcloud_time_ = t; for (size_t i = 0; i < num_cameras_; ++i) {
} const PointCloud& pcl_camera = pointclouds[i];
void RosPublisher::imageCallback(const ImagePtrVector& images, Time t) CHECK(pointcloud_pub_[i]);
{ if (pointcloud_pub_[i]->getNumSubscribers() == 0)
CHECK_EQ(image_pub_.size(), num_cameras_); continue;
for(size_t i=0; i<num_cameras_; ++i) Duration min_time_interval_between_published_pointclouds_ =
{ ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate);
sensor_sizes_[i] = images[i]->size(); if (last_published_pointcloud_time_ > 0
&& t - last_published_pointcloud_time_
< min_time_interval_between_published_pointclouds_) {
return;
}
CHECK(image_pub_[i]); pcl::PointCloud<pcl::PointXYZRGB>::Ptr
if(image_pub_[i]->getNumSubscribers() == 0) msg(new pcl::PointCloud<pcl::PointXYZRGB>);
{ std::stringstream ss;
continue; ss << "cam" << i;
pointCloudToMsg(pointclouds[i], ss.str(), t, msg);
pointcloud_pub_[i]->publish(msg);
}
last_published_pointcloud_time_ = t;
} }
static const Duration min_time_interval_between_published_images_ void RosPublisher::imageCallback(const ImagePtrVector& images, Time t) {
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate); CHECK_EQ(image_pub_.size(), num_cameras_);
if(last_published_image_time_ > 0 && t - last_published_image_time_ < min_time_interval_between_published_images_)
{ for (size_t i = 0; i < num_cameras_; ++i) {
return; sensor_sizes_[i] = images[i]->size();
CHECK(image_pub_[i]);
if (image_pub_[i]->getNumSubscribers() == 0)
continue;
static const Duration min_time_interval_between_published_images_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
if (last_published_image_time_ > 0
&& t - last_published_image_time_
< min_time_interval_between_published_images_) {
return;
}
if (images[i]) {
sensor_msgs::ImagePtr msg;
imageToMsg(*images[i], t, msg);
image_pub_[i]->publish(msg);
}
}
last_published_image_time_ = t;
} }
if(images[i]) void RosPublisher::imageCorruptedCallback(
{ const ImagePtrVector& corrupted_images, Time t
sensor_msgs::ImagePtr msg; ) {
imageToMsg(*images[i], t, msg); CHECK_EQ(image_corrupted_pub_.size(), num_cameras_);
image_pub_[i]->publish(msg);
}
}
last_published_image_time_ = t; for (size_t i = 0; i < num_cameras_; ++i) {
} CHECK(image_corrupted_pub_[i]);
if (image_corrupted_pub_[i]->getNumSubscribers() == 0)
continue;
void RosPublisher::imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) static const Duration min_time_interval_between_published_images_ =
{ ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
CHECK_EQ(image_corrupted_pub_.size(), num_cameras_); if (last_published_corrupted_image_time_ > 0
&& t - last_published_corrupted_image_time_
< min_time_interval_between_published_images_) {
return;
}
for(size_t i=0; i<num_cameras_; ++i) if (corrupted_images[i]) {
{ sensor_msgs::ImagePtr msg;
CHECK(image_corrupted_pub_[i]); imageToMsg(*corrupted_images[i], t, msg);
if(image_corrupted_pub_[i]->getNumSubscribers() == 0) image_corrupted_pub_[i]->publish(msg);
{ }
continue; }
last_published_corrupted_image_time_ = t;
} }
static const Duration min_time_interval_between_published_images_ void
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate); RosPublisher::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) {
if(last_published_corrupted_image_time_ > 0 && t - last_published_corrupted_image_time_ < min_time_interval_between_published_images_) CHECK_EQ(depthmap_pub_.size(), num_cameras_);
{
return; for (size_t i = 0; i < num_cameras_; ++i) {
CHECK(depthmap_pub_[i]);
if (depthmap_pub_[i]->getNumSubscribers() == 0)
continue;
static const Duration
min_time_interval_between_published_depthmaps_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate);
if (last_published_depthmap_time_ > 0
&& t - last_published_depthmap_time_
< min_time_interval_between_published_depthmaps_) {
return;
}
if (depthmaps[i]) {
sensor_msgs::ImagePtr msg;
depthmapToMsg(*depthmaps[i], t, msg);
depthmap_pub_[i]->publish(msg);
}
}
last_published_depthmap_time_ = t;
} }
if(corrupted_images[i]) void RosPublisher::opticFlowCallback(
{ const OpticFlowPtrVector& optic_flows, Time t
sensor_msgs::ImagePtr msg; ) {
imageToMsg(*corrupted_images[i], t, msg); CHECK_EQ(optic_flow_pub_.size(), num_cameras_);
image_corrupted_pub_[i]->publish(msg);
}
}
last_published_corrupted_image_time_ = t; for (size_t i = 0; i < num_cameras_; ++i) {
} CHECK(optic_flow_pub_[i]);
if (optic_flow_pub_[i]->getNumSubscribers() == 0)
continue;
void RosPublisher::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) static const Duration
{ min_time_interval_between_published_optic_flows_ =
CHECK_EQ(depthmap_pub_.size(), num_cameras_); (min_time_interval_between_published_optic_flows_ > 0)
? ze::secToNanosec(
1.0 / FLAGS_ros_publisher_optic_flow_rate
)
: 0;
if (min_time_interval_between_published_optic_flows_ > 0
&& last_published_optic_flow_time_ > 0
&& t - last_published_optic_flow_time_
< min_time_interval_between_published_optic_flows_) {
return;
}
for(size_t i=0; i<num_cameras_; ++i) if (optic_flows[i]) {
{ esim_msgs::OpticFlow::Ptr msg;
CHECK(depthmap_pub_[i]); msg.reset(new esim_msgs::OpticFlow);
if(depthmap_pub_[i]->getNumSubscribers() == 0) opticFlowToMsg(*optic_flows[i], t, msg);
{ optic_flow_pub_[i]->publish(msg);
continue; }
}
last_published_optic_flow_time_ = t;
} }
static const Duration min_time_interval_between_published_depthmaps_ void RosPublisher::eventsCallback(const EventsVector& events) {
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate); CHECK_EQ(event_pub_.size(), num_cameras_);
if(last_published_depthmap_time_ > 0 && t - last_published_depthmap_time_ < min_time_interval_between_published_depthmaps_)
{ for (size_t i = 0; i < num_cameras_; ++i) {
return; if (sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0)
continue;
if (events[i].empty())
continue;
CHECK(event_pub_[i]);
if (event_pub_[i]->getNumSubscribers() == 0)
continue;
dvs_msgs::EventArrayPtr msg;
msg.reset(new dvs_msgs::EventArray);
eventsToMsg(
events[i],
sensor_sizes_[i].width,
sensor_sizes_[i].height,
msg
);
event_pub_[i]->publish(msg);
}
} }
if(depthmaps[i]) void RosPublisher::poseCallback(
{ const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t
sensor_msgs::ImagePtr msg; ) {
depthmapToMsg(*depthmaps[i], t, msg); if (T_W_Cs.size() != num_cameras_) {
depthmap_pub_[i]->publish(msg); LOG(WARNING
} ) << "Number of poses is different than number of cameras."
} << "Will not output poses.";
return;
}
last_published_depthmap_time_ = t; // Publish to tf
} tf::StampedTransform bt;
bt.child_frame_id_ = "body";
bt.frame_id_ = "map";
bt.stamp_ = toRosTime(t);
tf::transformKindrToTF(T_W_B, &bt);
tf_broadcaster_->sendTransform(bt);
for (size_t i = 0; i < num_cameras_; ++i) {
std::stringstream ss;
ss << "cam" << i;
tf::StampedTransform bt;
bt.child_frame_id_ = ss.str();
bt.frame_id_ = "map";
bt.stamp_ = toRosTime(t);
tf::transformKindrToTF(T_W_Cs[i], &bt);
tf_broadcaster_->sendTransform(bt);
}
void RosPublisher::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) // Publish pose message
{ geometry_msgs::PoseStamped pose_stamped_msg;
CHECK_EQ(optic_flow_pub_.size(), num_cameras_); tf::poseStampedKindrToMsg(
T_W_B,
for(size_t i=0; i<num_cameras_; ++i) toRosTime(t),
{ "map",
CHECK(optic_flow_pub_[i]); &pose_stamped_msg
if(optic_flow_pub_[i]->getNumSubscribers() == 0) );
{ pose_pub_->publish(pose_stamped_msg);
continue;
} }
static const Duration min_time_interval_between_published_optic_flows_ void RosPublisher::twistCallback(
= (min_time_interval_between_published_optic_flows_ > 0) ? ze::secToNanosec(1.0 / FLAGS_ros_publisher_optic_flow_rate) : 0; const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t
if(min_time_interval_between_published_optic_flows_ > 0 && last_published_optic_flow_time_ > 0 && t - last_published_optic_flow_time_ < min_time_interval_between_published_optic_flows_) ) {
{ if (ws.size() != num_cameras_ || vs.size() != num_cameras_) {
return; LOG(WARNING
) << "Number of twists is different than number of cameras."
<< "Will not output twists.";
return;
}
CHECK_EQ(ws.size(), num_cameras_);
CHECK_EQ(vs.size(), num_cameras_);
CHECK_EQ(twist_pub_.size(), num_cameras_);
for (size_t i = 0; i < num_cameras_; ++i) {
CHECK(twist_pub_[i]);
if (twist_pub_[i]->getNumSubscribers() == 0)
continue;
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t);
twist_pub_[i]->publish(msg);
}
} }
if(optic_flows[i]) void
{ RosPublisher::imuCallback(const Vector3& acc, const Vector3& gyr, Time t) {
esim_msgs::OpticFlow::Ptr msg; if (imu_pub_->getNumSubscribers() == 0)
msg.reset(new esim_msgs::OpticFlow); return;
opticFlowToMsg(*optic_flows[i], t, msg);
optic_flow_pub_[i]->publish(msg);
}
}
last_published_optic_flow_time_ = t; const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t);
} imu_pub_->publish(msg);
void RosPublisher::eventsCallback(const EventsVector& events)
{
CHECK_EQ(event_pub_.size(), num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
if(sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0)
{
continue;
} }
if(events[i].empty()) void RosPublisher::cameraInfoCallback(
{ const ze::CameraRig::Ptr& camera_rig, Time t
continue; ) {
CHECK(camera_rig);
CHECK_EQ(camera_rig->size(), num_cameras_);
static const Duration min_time_interval_between_published_camera_info_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate);
if (last_published_camera_info_time_ > 0
&& t - last_published_camera_info_time_
< min_time_interval_between_published_camera_info_) {
return;
}
for (size_t i = 0; i < num_cameras_; ++i) {
CHECK(camera_info_pub_[i]);
if (camera_info_pub_[i]->getNumSubscribers() == 0)
continue;
sensor_msgs::CameraInfoPtr msg;
msg.reset(new sensor_msgs::CameraInfo);
cameraToMsg(camera_rig->atShared(i), t, msg);
camera_info_pub_[i]->publish(msg);
}
last_published_camera_info_time_ = t;
} }
CHECK(event_pub_[i]);
if(event_pub_[i]->getNumSubscribers() == 0)
{
continue;
}
dvs_msgs::EventArrayPtr msg;
msg.reset(new dvs_msgs::EventArray);
eventsToMsg(events[i], sensor_sizes_[i].width, sensor_sizes_[i].height, msg);
event_pub_[i]->publish(msg);
}
}
void RosPublisher::poseCallback(const Transformation& T_W_B,
const TransformationVector& T_W_Cs,
Time t)
{
if(T_W_Cs.size() != num_cameras_)
{
LOG(WARNING) << "Number of poses is different than number of cameras."
<< "Will not output poses.";
return;
}
// Publish to tf
tf::StampedTransform bt;
bt.child_frame_id_ = "body";
bt.frame_id_ = "map";
bt.stamp_ = toRosTime(t);
tf::transformKindrToTF(T_W_B, &bt);
tf_broadcaster_->sendTransform(bt);
for(size_t i=0; i<num_cameras_; ++i)
{
std::stringstream ss;
ss << "cam" << i;
tf::StampedTransform bt;
bt.child_frame_id_ = ss.str();
bt.frame_id_ = "map";
bt.stamp_ = toRosTime(t);
tf::transformKindrToTF(T_W_Cs[i], &bt);
tf_broadcaster_->sendTransform(bt);
}
// Publish pose message
geometry_msgs::PoseStamped pose_stamped_msg;
tf::poseStampedKindrToMsg(T_W_B, toRosTime(t), "map", &pose_stamped_msg);
pose_pub_->publish(pose_stamped_msg);
}
void RosPublisher::twistCallback(const AngularVelocityVector &ws, const LinearVelocityVector &vs, Time t)
{
if(ws.size() != num_cameras_
|| vs.size() != num_cameras_)
{
LOG(WARNING) << "Number of twists is different than number of cameras."
<< "Will not output twists.";
return;
}
CHECK_EQ(ws.size(), num_cameras_);
CHECK_EQ(vs.size(), num_cameras_);
CHECK_EQ(twist_pub_.size(), num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
CHECK(twist_pub_[i]);
if(twist_pub_[i]->getNumSubscribers() == 0)
{
continue;
}
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t);
twist_pub_[i]->publish(msg);
}
}
void RosPublisher::imuCallback(const Vector3& acc, const Vector3& gyr, Time t)
{
if(imu_pub_->getNumSubscribers() == 0)
{
return;
}
const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t);
imu_pub_->publish(msg);
}
void RosPublisher::cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t)
{
CHECK(camera_rig);
CHECK_EQ(camera_rig->size(), num_cameras_);
static const Duration min_time_interval_between_published_camera_info_
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate);
if(last_published_camera_info_time_ > 0 && t - last_published_camera_info_time_ < min_time_interval_between_published_camera_info_)
{
return;
}
for(size_t i=0; i<num_cameras_; ++i)
{
CHECK(camera_info_pub_[i]);
if(camera_info_pub_[i]->getNumSubscribers() == 0)
{
continue;
}
sensor_msgs::CameraInfoPtr msg;
msg.reset(new sensor_msgs::CameraInfo);
cameraToMsg(camera_rig->atShared(i), t, msg);
camera_info_pub_[i]->publish(msg);
}
last_published_camera_info_time_ = t;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,174 +1,169 @@
#include <esim/visualization/ros_utils.hpp>
#include <esim/common/utils.hpp>
#include <pcl_conversions/pcl_conversions.h>
#include <cv_bridge/cv_bridge.h> #include <cv_bridge/cv_bridge.h>
#include <esim/common/utils.hpp>
#include <esim/visualization/ros_utils.hpp>
#include <pcl_conversions/pcl_conversions.h>
namespace event_camera_simulator { namespace event_camera_simulator {
void pointCloudToMsg(const PointCloud& pointcloud, const std::string& frame_id, Time t, pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg) void pointCloudToMsg(
{ const PointCloud& pointcloud,
CHECK(msg); const std::string& frame_id,
msg->header.frame_id = frame_id; Time t,
msg->height = pointcloud.size(); pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg
msg->width = 1; ) {
for(auto& p_c : pointcloud) CHECK(msg);
{ msg->header.frame_id = frame_id;
pcl::PointXYZRGB p; msg->height = pointcloud.size();
p.x = p_c.xyz(0); msg->width = 1;
p.y = p_c.xyz(1); for (auto& p_c : pointcloud) {
p.z = p_c.xyz(2); pcl::PointXYZRGB p;
p.r = p_c.rgb(0); p.x = p_c.xyz(0);
p.g = p_c.rgb(1); p.y = p_c.xyz(1);
p.b = p_c.rgb(2); p.z = p_c.xyz(2);
msg->points.push_back(p); p.r = p_c.rgb(0);
} p.g = p_c.rgb(1);
p.b = p_c.rgb(2);
msg->points.push_back(p);
}
pcl_conversions::toPCL(toRosTime(t), msg->header.stamp); pcl_conversions::toPCL(toRosTime(t), msg->header.stamp);
}
void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg)
{
cv_bridge::CvImage cv_image;
image.convertTo(cv_image.image, CV_8U, 255.0);
cv_image.encoding = "mono8";
cv_image.header.stamp = toRosTime(t);
msg = cv_image.toImageMsg();
}
void depthmapToMsg(const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg)
{
cv_bridge::CvImage cv_depthmap;
depthmap.convertTo(cv_depthmap.image, CV_32FC1);
cv_depthmap.encoding = "32FC1";
cv_depthmap.header.stamp = toRosTime(t);
msg = cv_depthmap.toImageMsg();
}
void opticFlowToMsg(const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg)
{
CHECK(msg);
msg->header.stamp = toRosTime(t);
const int height = flow.rows;
const int width = flow.cols;
msg->height = height;
msg->width = width;
msg->flow_x.resize(height * width);
msg->flow_y.resize(height * width);
for(int y=0; y<height; ++y)
{
for(int x=0; x<width; ++x)
{
msg->flow_x[x + y * width] = static_cast<float>(flow(y,x)[0]);
msg->flow_y[x + y * width] = static_cast<float>(flow(y,x)[1]);
} }
}
}
void eventsToMsg(const Events& events, int width, int height, dvs_msgs::EventArrayPtr& msg) void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg) {
{ cv_bridge::CvImage cv_image;
CHECK(msg); image.convertTo(cv_image.image, CV_8U, 255.0);
std::vector<dvs_msgs::Event> events_list; cv_image.encoding = "mono8";
for(const Event& e : events) cv_image.header.stamp = toRosTime(t);
{ msg = cv_image.toImageMsg();
dvs_msgs::Event ev;
ev.x = e.x;
ev.y = e.y;
ev.ts = toRosTime(e.t);
ev.polarity = e.pol;
events_list.push_back(ev);
}
msg->events = events_list;
msg->height = height;
msg->width = width;
msg->header.stamp = events_list.back().ts;
}
sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t)
{
sensor_msgs::Imu imu;
imu.header.stamp = toRosTime(t);
imu.linear_acceleration.x = acc(0);
imu.linear_acceleration.y = acc(1);
imu.linear_acceleration.z = acc(2);
imu.angular_velocity.x = gyr(0);
imu.angular_velocity.y = gyr(1);
imu.angular_velocity.z = gyr(2);
return imu;
}
geometry_msgs::TwistStamped twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t)
{
geometry_msgs::TwistStamped twist;
twist.header.stamp = toRosTime(t);
twist.twist.angular.x = w(0);
twist.twist.angular.y = w(1);
twist.twist.angular.z = w(2);
twist.twist.linear.x = v(0);
twist.twist.linear.y = v(1);
twist.twist.linear.z = v(2);
return twist;
}
void cameraToMsg(const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg)
{
CHECK(msg);
msg->width = camera->width();
msg->height = camera->height();
msg->header.stamp = toRosTime(t);
CalibrationMatrix K = calibrationMatrixFromCamera(camera);
boost::array<double, 9> K_vec;
std::vector<double> D_vec;
for(int i=0; i<3; ++i)
{
for(int j=0; j<3; ++j)
{
K_vec[j+i*3] = static_cast<double>(K(i,j));
} }
}
switch(camera->type()) void depthmapToMsg(
{ const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg
case ze::CameraType::PinholeRadialTangential: ) {
case ze::CameraType::Pinhole: cv_bridge::CvImage cv_depthmap;
msg->distortion_model = "plumb_bob"; depthmap.convertTo(cv_depthmap.image, CV_32FC1);
break; cv_depthmap.encoding = "32FC1";
case ze::CameraType::PinholeEquidistant: cv_depthmap.header.stamp = toRosTime(t);
msg->distortion_model = "equidistant"; msg = cv_depthmap.toImageMsg();
break; }
case ze::CameraType::PinholeFov:
msg->distortion_model = "fov";
break;
default:
LOG(WARNING) << "Unknown camera distortion model";
msg->distortion_model = "";
break;
}
for(int j=0; j<camera->distortionParameters().rows(); ++j) void opticFlowToMsg(
{ const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg
D_vec.push_back(static_cast<double>(camera->distortionParameters()(j))); // @TODO: use the distortion params from the camera ) {
} CHECK(msg);
msg->header.stamp = toRosTime(t);
msg->K = K_vec; const int height = flow.rows;
msg->D = D_vec; const int width = flow.cols;
msg->P = {K(0,0), 0, K(0,2), 0, msg->height = height;
0, K(1,1), K(1,2), 0, msg->width = width;
0, 0, 1, 0};
msg->R = {1, 0, 0,
0, 1, 0,
0, 0, 1};
}
msg->flow_x.resize(height * width);
msg->flow_y.resize(height * width);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
msg->flow_x[x + y * width] = static_cast<float>(flow(y, x)[0]);
msg->flow_y[x + y * width] = static_cast<float>(flow(y, x)[1]);
}
}
}
void eventsToMsg(
const Events& events,
int width,
int height,
dvs_msgs::EventArrayPtr& msg
) {
CHECK(msg);
std::vector<dvs_msgs::Event> events_list;
for (const Event& e : events) {
dvs_msgs::Event ev;
ev.x = e.x;
ev.y = e.y;
ev.ts = toRosTime(e.t);
ev.polarity = e.pol;
events_list.push_back(ev);
}
msg->events = events_list;
msg->height = height;
msg->width = width;
msg->header.stamp = events_list.back().ts;
}
sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t) {
sensor_msgs::Imu imu;
imu.header.stamp = toRosTime(t);
imu.linear_acceleration.x = acc(0);
imu.linear_acceleration.y = acc(1);
imu.linear_acceleration.z = acc(2);
imu.angular_velocity.x = gyr(0);
imu.angular_velocity.y = gyr(1);
imu.angular_velocity.z = gyr(2);
return imu;
}
geometry_msgs::TwistStamped
twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t) {
geometry_msgs::TwistStamped twist;
twist.header.stamp = toRosTime(t);
twist.twist.angular.x = w(0);
twist.twist.angular.y = w(1);
twist.twist.angular.z = w(2);
twist.twist.linear.x = v(0);
twist.twist.linear.y = v(1);
twist.twist.linear.z = v(2);
return twist;
}
void cameraToMsg(
const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg
) {
CHECK(msg);
msg->width = camera->width();
msg->height = camera->height();
msg->header.stamp = toRosTime(t);
CalibrationMatrix K = calibrationMatrixFromCamera(camera);
boost::array<double, 9> K_vec;
std::vector<double> D_vec;
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
K_vec[j + i * 3] = static_cast<double>(K(i, j));
switch (camera->type()) {
case ze::CameraType::PinholeRadialTangential:
case ze::CameraType::Pinhole:
msg->distortion_model = "plumb_bob";
break;
case ze::CameraType::PinholeEquidistant:
msg->distortion_model = "equidistant";
break;
case ze::CameraType::PinholeFov:
msg->distortion_model = "fov";
break;
default:
LOG(WARNING) << "Unknown camera distortion model";
msg->distortion_model = "";
break;
}
for (int j = 0; j < camera->distortionParameters().rows(); ++j) {
D_vec.push_back(static_cast<double>(camera->distortionParameters()(j
))); // @TODO: use the distortion params from the camera
}
msg->K = K_vec;
msg->D = D_vec;
msg->P = {K(0, 0), 0, K(0, 2), 0, 0, K(1, 1), K(1, 2), 0, 0, 0, 1, 0};
msg->R = {1, 0, 0, 0, 1, 0, 0, 0, 1};
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,13 +1,12 @@
#include <esim/visualization/rosbag_writer.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/common/time_conversions.hpp>
#include <esim/visualization/ros_utils.hpp> #include <esim/visualization/ros_utils.hpp>
#include <esim/visualization/rosbag_writer.hpp>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <minkindr_conversions/kindr_msg.h> #include <minkindr_conversions/kindr_msg.h>
#include <minkindr_conversions/kindr_tf.h> #include <minkindr_conversions/kindr_tf.h>
#include <tf/tfMessage.h> #include <tf/tfMessage.h>
#include <ze/common/time_conversions.hpp>
#include <gflags/gflags.h>
#include <glog/logging.h>
DECLARE_double(ros_publisher_camera_info_rate); DECLARE_double(ros_publisher_camera_info_rate);
DECLARE_double(ros_publisher_frame_rate); DECLARE_double(ros_publisher_frame_rate);
@ -15,294 +14,321 @@ DECLARE_double(ros_publisher_depth_rate);
DECLARE_double(ros_publisher_pointcloud_rate); DECLARE_double(ros_publisher_pointcloud_rate);
DECLARE_double(ros_publisher_optic_flow_rate); DECLARE_double(ros_publisher_optic_flow_rate);
DEFINE_string(path_to_output_bag, "", DEFINE_string(
"Path to which save the output bag file."); path_to_output_bag, "", "Path to which save the output bag file."
);
namespace event_camera_simulator { namespace event_camera_simulator {
RosbagWriter::RosbagWriter(const std::string& path_to_output_bag, size_t num_cameras) RosbagWriter::RosbagWriter(
{ const std::string& path_to_output_bag, size_t num_cameras
CHECK_GE(num_cameras, 1); ) {
num_cameras_ = num_cameras; CHECK_GE(num_cameras, 1);
sensor_sizes_ = std::vector<cv::Size>(num_cameras_); num_cameras_ = num_cameras;
sensor_sizes_ = std::vector<cv::Size>(num_cameras_);
try try {
{ bag_.open(path_to_output_bag, rosbag::bagmode::Write);
bag_.open(path_to_output_bag, rosbag::bagmode::Write); } catch (rosbag::BagIOException e) {
} LOG(FATAL) << "Error: could not open rosbag: "
catch(rosbag::BagIOException e) << FLAGS_path_to_output_bag << std::endl;
{ return;
LOG(FATAL) << "Error: could not open rosbag: " << FLAGS_path_to_output_bag << std::endl; }
return;
}
LOG(INFO) << "Will write to bag: " << path_to_output_bag; LOG(INFO) << "Will write to bag: " << path_to_output_bag;
last_published_camera_info_time_ = 0; last_published_camera_info_time_ = 0;
last_published_image_time_ = 0; last_published_image_time_ = 0;
last_published_corrupted_image_time_ = 0; last_published_corrupted_image_time_ = 0;
last_published_depthmap_time_ = 0; last_published_depthmap_time_ = 0;
last_published_optic_flow_time_ = 0; last_published_optic_flow_time_ = 0;
last_published_pointcloud_time_ = 0; last_published_pointcloud_time_ = 0;
}
Publisher::Ptr RosbagWriter::createBagWriterFromGflags(size_t num_cameras)
{
if(FLAGS_path_to_output_bag == "")
{
LOG(INFO) << "Empty output bag string: will not write to rosbag";
return nullptr;
}
return std::make_shared<RosbagWriter>(FLAGS_path_to_output_bag, num_cameras);
}
RosbagWriter::~RosbagWriter()
{
LOG(INFO) << "Finalizing the bag...";
bag_.close();
LOG(INFO) << "Finished writing to bag: " << FLAGS_path_to_output_bag;
}
void RosbagWriter::pointcloudCallback(const PointCloudVector& pointclouds, Time t)
{
CHECK_EQ(pointclouds.size(), num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
Duration min_time_interval_between_published_pointclouds_
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate);
if(last_published_pointcloud_time_ > 0 && t - last_published_pointcloud_time_ < min_time_interval_between_published_pointclouds_)
{
return;
} }
pcl::PointCloud<pcl::PointXYZRGB>::Ptr msg (new pcl::PointCloud<pcl::PointXYZRGB>); Publisher::Ptr RosbagWriter::createBagWriterFromGflags(size_t num_cameras) {
std::stringstream ss; ss << "cam" << i; if (FLAGS_path_to_output_bag == "") {
pointCloudToMsg(pointclouds[i], ss.str(), t, msg); LOG(INFO) << "Empty output bag string: will not write to rosbag";
bag_.write(getTopicName(topic_name_prefix_, i, "pointcloud"), return nullptr;
toRosTime(t), msg); }
}
last_published_pointcloud_time_ = t;
}
void RosbagWriter::imageCallback(const ImagePtrVector& images, Time t) return std::make_shared<RosbagWriter>(
{ FLAGS_path_to_output_bag,
for(size_t i=0; i<num_cameras_; ++i) num_cameras
{ );
sensor_sizes_[i] = images[i]->size();
static const Duration min_time_interval_between_published_images_
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
if(last_published_image_time_ > 0 && t - last_published_image_time_ < min_time_interval_between_published_images_)
{
return;
} }
if(images[i]) RosbagWriter::~RosbagWriter() {
{ LOG(INFO) << "Finalizing the bag...";
sensor_msgs::ImagePtr msg; bag_.close();
imageToMsg(*images[i], t, msg); LOG(INFO) << "Finished writing to bag: " << FLAGS_path_to_output_bag;
bag_.write(getTopicName(topic_name_prefix_, i, "image_raw"),
msg->header.stamp, msg);
}
}
last_published_image_time_ = t;
}
void RosbagWriter::imageCorruptedCallback(const ImagePtrVector& images_corrupted, Time t)
{
for(size_t i=0; i<num_cameras_; ++i)
{
static const Duration min_time_interval_between_published_images_
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
if(last_published_corrupted_image_time_ > 0 && t - last_published_corrupted_image_time_ < min_time_interval_between_published_images_)
{
return;
} }
if(images_corrupted[i]) void RosbagWriter::pointcloudCallback(
{ const PointCloudVector& pointclouds, Time t
sensor_msgs::ImagePtr msg; ) {
imageToMsg(*images_corrupted[i], t, msg); CHECK_EQ(pointclouds.size(), num_cameras_);
bag_.write(getTopicName(topic_name_prefix_, i, "image_corrupted"),
msg->header.stamp, msg);
}
}
last_published_corrupted_image_time_ = t;
}
for (size_t i = 0; i < num_cameras_; ++i) {
Duration min_time_interval_between_published_pointclouds_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate);
if (last_published_pointcloud_time_ > 0
&& t - last_published_pointcloud_time_
< min_time_interval_between_published_pointclouds_) {
return;
}
void RosbagWriter::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) pcl::PointCloud<pcl::PointXYZRGB>::Ptr
{ msg(new pcl::PointCloud<pcl::PointXYZRGB>);
if(depthmaps.size() != num_cameras_) std::stringstream ss;
{ ss << "cam" << i;
return; pointCloudToMsg(pointclouds[i], ss.str(), t, msg);
} bag_.write(
getTopicName(topic_name_prefix_, i, "pointcloud"),
for(size_t i=0; i<num_cameras_; ++i) toRosTime(t),
{ msg
static const Duration min_time_interval_between_published_depthmaps_ );
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate); }
if(last_published_depthmap_time_ > 0 && t - last_published_depthmap_time_ < min_time_interval_between_published_depthmaps_) last_published_pointcloud_time_ = t;
{
return;
} }
if(depthmaps[i]) void RosbagWriter::imageCallback(const ImagePtrVector& images, Time t) {
{ for (size_t i = 0; i < num_cameras_; ++i) {
sensor_msgs::ImagePtr msg; sensor_sizes_[i] = images[i]->size();
depthmapToMsg(*depthmaps[i], t, msg);
bag_.write(getTopicName(topic_name_prefix_, i, "depthmap"),
msg->header.stamp, msg);
}
}
last_published_depthmap_time_ = t;
}
static const Duration min_time_interval_between_published_images_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
if (last_published_image_time_ > 0
&& t - last_published_image_time_
< min_time_interval_between_published_images_) {
return;
}
void RosbagWriter::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) if (images[i]) {
{ sensor_msgs::ImagePtr msg;
if(optic_flows.size() != num_cameras_) imageToMsg(*images[i], t, msg);
{ bag_.write(
return; getTopicName(topic_name_prefix_, i, "image_raw"),
} msg->header.stamp,
msg
for(size_t i=0; i<num_cameras_; ++i) );
{ }
static const Duration min_time_interval_between_published_optic_flows_ }
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_optic_flow_rate); last_published_image_time_ = t;
if(last_published_optic_flow_time_ > 0 && t - last_published_optic_flow_time_ < min_time_interval_between_published_optic_flows_)
{
return;
} }
if(optic_flows[i]) void RosbagWriter::imageCorruptedCallback(
{ const ImagePtrVector& images_corrupted, Time t
esim_msgs::OpticFlow::Ptr msg; ) {
msg.reset(new esim_msgs::OpticFlow); for (size_t i = 0; i < num_cameras_; ++i) {
opticFlowToMsg(*optic_flows[i], t, msg); static const Duration min_time_interval_between_published_images_ =
bag_.write(getTopicName(topic_name_prefix_, i, "optic_flow"), ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
msg->header.stamp, msg); if (last_published_corrupted_image_time_ > 0
} && t - last_published_corrupted_image_time_
} < min_time_interval_between_published_images_) {
return;
}
last_published_optic_flow_time_ = t; if (images_corrupted[i]) {
} sensor_msgs::ImagePtr msg;
imageToMsg(*images_corrupted[i], t, msg);
void RosbagWriter::eventsCallback(const EventsVector& events) bag_.write(
{ getTopicName(topic_name_prefix_, i, "image_corrupted"),
for(size_t i=0; i<num_cameras_; ++i) msg->header.stamp,
{ msg
if(sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0) );
{ }
continue; }
last_published_corrupted_image_time_ = t;
} }
if(events[i].empty()) void
{ RosbagWriter::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) {
continue; if (depthmaps.size() != num_cameras_)
return;
for (size_t i = 0; i < num_cameras_; ++i) {
static const Duration
min_time_interval_between_published_depthmaps_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate);
if (last_published_depthmap_time_ > 0
&& t - last_published_depthmap_time_
< min_time_interval_between_published_depthmaps_) {
return;
}
if (depthmaps[i]) {
sensor_msgs::ImagePtr msg;
depthmapToMsg(*depthmaps[i], t, msg);
bag_.write(
getTopicName(topic_name_prefix_, i, "depthmap"),
msg->header.stamp,
msg
);
}
}
last_published_depthmap_time_ = t;
} }
dvs_msgs::EventArrayPtr msg; void RosbagWriter::opticFlowCallback(
msg.reset(new dvs_msgs::EventArray); const OpticFlowPtrVector& optic_flows, Time t
eventsToMsg(events[i], sensor_sizes_[i].width, sensor_sizes_[i].height, msg); ) {
if (optic_flows.size() != num_cameras_)
return;
bag_.write(getTopicName(topic_name_prefix_, i, "events"), for (size_t i = 0; i < num_cameras_; ++i) {
msg->header.stamp, msg); static const Duration
} min_time_interval_between_published_optic_flows_ =
} ze::secToNanosec(1.0 / FLAGS_ros_publisher_optic_flow_rate);
if (last_published_optic_flow_time_ > 0
&& t - last_published_optic_flow_time_
< min_time_interval_between_published_optic_flows_) {
return;
}
void RosbagWriter::poseCallback(const Transformation& T_W_B, if (optic_flows[i]) {
const TransformationVector& T_W_Cs, esim_msgs::OpticFlow::Ptr msg;
Time t) msg.reset(new esim_msgs::OpticFlow);
{ opticFlowToMsg(*optic_flows[i], t, msg);
if(T_W_Cs.size() != num_cameras_) bag_.write(
{ getTopicName(topic_name_prefix_, i, "optic_flow"),
LOG(WARNING) << "Number of poses is different than number of cameras." msg->header.stamp,
<< "Will not output poses."; msg
return; );
} }
geometry_msgs::PoseStamped pose_stamped_msg; }
geometry_msgs::TransformStamped transform_stamped_msg;
transform_stamped_msg.header.frame_id = "map";
transform_stamped_msg.header.stamp = toRosTime(t);
tf::tfMessage tf_msg;
for(size_t i=0; i<num_cameras_; ++i) last_published_optic_flow_time_ = t;
{ }
// Write pose to bag
tf::poseStampedKindrToMsg(T_W_Cs[i], toRosTime(t), "map", &pose_stamped_msg);
bag_.write(getTopicName(topic_name_prefix_, i, "pose"),
toRosTime(t), pose_stamped_msg);
// Write tf transform to bag void RosbagWriter::eventsCallback(const EventsVector& events) {
std::stringstream ss; ss << "cam" << i; for (size_t i = 0; i < num_cameras_; ++i) {
transform_stamped_msg.child_frame_id = ss.str(); if (sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0)
tf::transformKindrToMsg(T_W_Cs[i], &transform_stamped_msg.transform); continue;
tf_msg.transforms.push_back(transform_stamped_msg);
}
transform_stamped_msg.child_frame_id = "body"; if (events[i].empty())
tf::transformKindrToMsg(T_W_B, &transform_stamped_msg.transform); continue;
tf_msg.transforms.push_back(transform_stamped_msg);
bag_.write("/tf", toRosTime(t), tf_msg); dvs_msgs::EventArrayPtr msg;
} msg.reset(new dvs_msgs::EventArray);
eventsToMsg(
events[i],
sensor_sizes_[i].width,
sensor_sizes_[i].height,
msg
);
void RosbagWriter::twistCallback(const AngularVelocityVector &ws, const LinearVelocityVector &vs, Time t) bag_.write(
{ getTopicName(topic_name_prefix_, i, "events"),
if(ws.size() != num_cameras_ msg->header.stamp,
|| vs.size() != num_cameras_) msg
{ );
LOG(WARNING) << "Number of twists is different than number of cameras." }
<< "Will not output twists."; }
return;
}
CHECK_EQ(ws.size(), num_cameras_);
CHECK_EQ(vs.size(), num_cameras_);
for(size_t i=0; i<num_cameras_; ++i) void RosbagWriter::poseCallback(
{ const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t); ) {
bag_.write(getTopicName(topic_name_prefix_, i, "twist"), if (T_W_Cs.size() != num_cameras_) {
msg.header.stamp, msg); LOG(WARNING
} ) << "Number of poses is different than number of cameras."
} << "Will not output poses.";
return;
}
geometry_msgs::PoseStamped pose_stamped_msg;
geometry_msgs::TransformStamped transform_stamped_msg;
transform_stamped_msg.header.frame_id = "map";
transform_stamped_msg.header.stamp = toRosTime(t);
tf::tfMessage tf_msg;
void RosbagWriter::imuCallback(const Vector3& acc, const Vector3& gyr, Time t) for (size_t i = 0; i < num_cameras_; ++i) {
{ // Write pose to bag
VLOG_EVERY_N(1, 500) << "t = " << ze::nanosecToSecTrunc(t) << " s"; tf::poseStampedKindrToMsg(
T_W_Cs[i],
toRosTime(t),
"map",
&pose_stamped_msg
);
bag_.write(
getTopicName(topic_name_prefix_, i, "pose"),
toRosTime(t),
pose_stamped_msg
);
const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t); // Write tf transform to bag
const std::string imu_topic = "/imu"; std::stringstream ss;
bag_.write(imu_topic, ss << "cam" << i;
msg.header.stamp, msg); transform_stamped_msg.child_frame_id = ss.str();
} tf::transformKindrToMsg(
T_W_Cs[i],
&transform_stamped_msg.transform
);
tf_msg.transforms.push_back(transform_stamped_msg);
}
void RosbagWriter::cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) transform_stamped_msg.child_frame_id = "body";
{ tf::transformKindrToMsg(T_W_B, &transform_stamped_msg.transform);
CHECK(camera_rig); tf_msg.transforms.push_back(transform_stamped_msg);
CHECK_EQ(camera_rig->size(), num_cameras_);
static const Duration min_time_interval_between_published_camera_info_ bag_.write("/tf", toRosTime(t), tf_msg);
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate); }
if(last_published_camera_info_time_ > 0 && t - last_published_camera_info_time_ < min_time_interval_between_published_camera_info_)
{
return;
}
for(size_t i=0; i<num_cameras_; ++i) void RosbagWriter::twistCallback(
{ const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t
sensor_msgs::CameraInfoPtr msg; ) {
msg.reset(new sensor_msgs::CameraInfo); if (ws.size() != num_cameras_ || vs.size() != num_cameras_) {
cameraToMsg(camera_rig->atShared(i), t, msg); LOG(WARNING
bag_.write(getTopicName(topic_name_prefix_, i, "camera_info"), ) << "Number of twists is different than number of cameras."
msg->header.stamp, msg); << "Will not output twists.";
} return;
}
CHECK_EQ(ws.size(), num_cameras_);
CHECK_EQ(vs.size(), num_cameras_);
last_published_camera_info_time_ = t; for (size_t i = 0; i < num_cameras_; ++i) {
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t);
bag_.write(
getTopicName(topic_name_prefix_, i, "twist"),
msg.header.stamp,
msg
);
}
}
} void
RosbagWriter::imuCallback(const Vector3& acc, const Vector3& gyr, Time t) {
VLOG_EVERY_N(1, 500) << "t = " << ze::nanosecToSecTrunc(t) << " s";
const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t);
const std::string imu_topic = "/imu";
bag_.write(imu_topic, msg.header.stamp, msg);
}
void RosbagWriter::cameraInfoCallback(
const ze::CameraRig::Ptr& camera_rig, Time t
) {
CHECK(camera_rig);
CHECK_EQ(camera_rig->size(), num_cameras_);
static const Duration min_time_interval_between_published_camera_info_ =
ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate);
if (last_published_camera_info_time_ > 0
&& t - last_published_camera_info_time_
< min_time_interval_between_published_camera_info_) {
return;
}
for (size_t i = 0; i < num_cameras_; ++i) {
sensor_msgs::CameraInfoPtr msg;
msg.reset(new sensor_msgs::CameraInfo);
cameraToMsg(camera_rig->atShared(i), t, msg);
bag_.write(
getTopicName(topic_name_prefix_, i, "camera_info"),
msg->header.stamp,
msg
);
}
last_published_camera_info_time_ = t;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,107 +1,127 @@
#include <esim/visualization/synthetic_optic_flow_publisher.hpp>
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <ze/common/path_utils.hpp> #include <esim/visualization/synthetic_optic_flow_publisher.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/time_conversions.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/highgui/highgui.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp>
#include <ze/common/time_conversions.hpp>
DEFINE_string(synthetic_optic_flow_output_folder, "", DEFINE_string(
"Folder in which to output the events."); synthetic_optic_flow_output_folder,
"",
"Folder in which to output the events."
);
namespace event_camera_simulator { namespace event_camera_simulator {
/** /**
* This publisher was designed with the purpose of generating simulation data * This publisher was designed with the purpose of generating simulation
* with ground truth labels, for the task of optic flow estimation. * data with ground truth labels, for the task of optic flow estimation.
* *
* It assumes that it will receive a relatively small sequence of events (corresponding, for example, * It assumes that it will receive a relatively small sequence of events
* to all the events in between two frames), and will write all the events to disk in its destructor, * (corresponding, for example, to all the events in between two frames),
* in three forms: * and will write all the events to disk in its destructor, in three forms:
* - an "events.txt" file that contains all the events in "t x y pol" format (one event per line) * - an "events.txt" file that contains all the events in "t x y pol"
* - an "event_count.png" image that whose first two channels contain the counts of the positive (resp. negative) event counts at each pixel * format (one event per line)
* - two "timestamps images" in which each pixel contains the timestamp at the last event that fell on the pixel. * - an "event_count.png" image that whose first two channels contain the
* (since the timestamp is a floating point value, it is split in 3 8-bit values so that the timestamp images * counts of the positive (resp. negative) event counts at each pixel
* can be saved in a single 3-channel image). * - two "timestamps images" in which each pixel contains the timestamp at
*/ * the last event that fell on the pixel. (since the timestamp is a floating
SyntheticOpticFlowPublisher::SyntheticOpticFlowPublisher(const std::string& output_folder) * point value, it is split in 3 8-bit values so that the timestamp images
: output_folder_(output_folder) * can be saved in a single 3-channel image).
{ */
ze::openOutputFileStream(ze::joinPath(output_folder, "events.txt"), SyntheticOpticFlowPublisher::SyntheticOpticFlowPublisher(
&events_file_); const std::string& output_folder
} )
: output_folder_(output_folder) {
ze::openOutputFileStream(
ze::joinPath(output_folder, "events.txt"),
&events_file_
);
}
Publisher::Ptr SyntheticOpticFlowPublisher::createFromGflags() Publisher::Ptr SyntheticOpticFlowPublisher::createFromGflags() {
{ if (FLAGS_synthetic_optic_flow_output_folder == "") {
if(FLAGS_synthetic_optic_flow_output_folder == "") LOG(WARNING
{ ) << "Empty output folder string: will not write synthetic "
LOG(WARNING) << "Empty output folder string: will not write synthetic optic flow files"; "optic flow files";
return nullptr; return nullptr;
} }
return std::make_shared<SyntheticOpticFlowPublisher>(FLAGS_synthetic_optic_flow_output_folder); return std::make_shared<SyntheticOpticFlowPublisher>(
} FLAGS_synthetic_optic_flow_output_folder
);
}
SyntheticOpticFlowPublisher::~SyntheticOpticFlowPublisher() SyntheticOpticFlowPublisher::~SyntheticOpticFlowPublisher() {
{ // Create an event count image using all the events collected
// Create an event count image using all the events collected cv::Mat event_count_image = cv::Mat::zeros(sensor_size_, CV_8UC3);
cv::Mat event_count_image = cv::Mat::zeros(sensor_size_, CV_8UC3);
// Create two event timestamps images using all the events collected // Create two event timestamps images using all the events collected
cv::Mat timestamps_pos = cv::Mat::zeros(sensor_size_, CV_8UC3); cv::Mat timestamps_pos = cv::Mat::zeros(sensor_size_, CV_8UC3);
cv::Mat timestamps_neg = cv::Mat::zeros(sensor_size_, CV_8UC3); cv::Mat timestamps_neg = cv::Mat::zeros(sensor_size_, CV_8UC3);
int remapped_timestamp_fraction; int remapped_timestamp_fraction;
double timestamp_fraction; double timestamp_fraction;
for(Event e : events_) for (Event e : events_) {
{ event_count_image.at<cv::Vec3b>(e.y, e.x)[int(e.pol)]++;
event_count_image.at<cv::Vec3b>(e.y,e.x)[int(e.pol)]++;
cv::Mat& curr_timestamp_image = e.pol ? timestamps_pos : timestamps_neg; cv::Mat& curr_timestamp_image =
e.pol ? timestamps_pos : timestamps_neg;
// remap value // remap value
timestamp_fraction = double(e.t - events_[0].t) / (events_[events_.size()-1].t - events_[0].t); timestamp_fraction = double(e.t - events_[0].t)
remapped_timestamp_fraction = timestamp_fraction * std::pow(2,24); // remap 0-1 to 0 - 2^24 / (events_[events_.size() - 1].t - events_[0].t);
remapped_timestamp_fraction =
timestamp_fraction * std::pow(2, 24); // remap 0-1 to 0 - 2^24
// distribute the 24 bit number (remapped_timestamp_fraction) to 3 channel 8 bit image // distribute the 24 bit number (remapped_timestamp_fraction) to 3
for (int i=0; i<3; i++) // channel 8 bit image
{ for (int i = 0; i < 3; i++) {
curr_timestamp_image.at<cv::Vec3b>(e.y,e.x)[i] = (int) remapped_timestamp_fraction & 0xFF; // bit mask of 0000 0000 0000 0000 1111 1111 curr_timestamp_image.at<cv::Vec3b>(e.y, e.x)[i] =
remapped_timestamp_fraction = remapped_timestamp_fraction >> 8; // shifts bits to right by 8 (int) remapped_timestamp_fraction
} & 0xFF; // bit mask of 0000 0000 0000 0000 1111 1111
} remapped_timestamp_fraction = remapped_timestamp_fraction
>> 8; // shifts bits to right by 8
}
}
// Write event count image + the two timestamps images to disk // Write event count image + the two timestamps images to disk
std::string path_event_count_image = ze::joinPath(output_folder_, "event_count.png"); std::string path_event_count_image =
std::string path_timestamps_pos = ze::joinPath(output_folder_, "event_time_stamps_pos.png"); ze::joinPath(output_folder_, "event_count.png");
std::string path_timestamps_neg = ze::joinPath(output_folder_, "event_time_stamps_neg.png"); std::string path_timestamps_pos =
ze::joinPath(output_folder_, "event_time_stamps_pos.png");
std::string path_timestamps_neg =
ze::joinPath(output_folder_, "event_time_stamps_neg.png");
std::vector<int> compression_params; std::vector<int> compression_params;
compression_params.push_back(cv::IMWRITE_PNG_COMPRESSION); compression_params.push_back(cv::IMWRITE_PNG_COMPRESSION);
compression_params.push_back(0); compression_params.push_back(0);
cv::imwrite(path_event_count_image, event_count_image, compression_params); cv::imwrite(
cv::imwrite(path_timestamps_pos, timestamps_pos, compression_params); path_event_count_image,
cv::imwrite(path_timestamps_neg, timestamps_neg, compression_params); event_count_image,
compression_params
);
cv::imwrite(path_timestamps_pos, timestamps_pos, compression_params);
cv::imwrite(path_timestamps_neg, timestamps_neg, compression_params);
// Finish writing event file // Finish writing event file
events_file_.close(); events_file_.close();
} }
void SyntheticOpticFlowPublisher::eventsCallback(const EventsVector& events) void SyntheticOpticFlowPublisher::eventsCallback(const EventsVector& events
{ ) {
CHECK_EQ(events.size(), 1); CHECK_EQ(events.size(), 1);
// Simply aggregate the events into the events_ buffer. // Simply aggregate the events into the events_ buffer.
// At the destruction of this object, everything will be saved to disk. // At the destruction of this object, everything will be saved to disk.
for(const Event& e : events[0]) for (const Event& e : events[0]) {
{ events_file_ << e.t << " " << e.x << " " << e.y << " "
events_file_ << e.t << " " << e.x << " " << e.y << " " << (e.pol? 1 : 0) << std::endl; << (e.pol ? 1 : 0) << std::endl;
events_.push_back(e); events_.push_back(e);
} }
} }
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,35 +1,39 @@
#pragma once #pragma once
#include <esim/rendering/simple_renderer_base.hpp>
#include <esim/imp_multi_objects_2d/object.hpp> #include <esim/imp_multi_objects_2d/object.hpp>
#include <esim/rendering/simple_renderer_base.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
//! A rendering engine for planar scenes //! A rendering engine for planar scenes
class MultiObject2DRenderer : public SimpleRenderer class MultiObject2DRenderer : public SimpleRenderer {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
MultiObject2DRenderer(); MultiObject2DRenderer();
~MultiObject2DRenderer(); ~MultiObject2DRenderer();
virtual bool render(const Time t, virtual bool render(
const ImagePtr& out_image, const Time t,
const OpticFlowPtr& optic_flow_map) const; const ImagePtr& out_image,
const OpticFlowPtr& optic_flow_map
) const;
virtual int getWidth() const { return width_; } virtual int getWidth() const {
return width_;
}
virtual int getHeight() const { return height_; } virtual int getHeight() const {
return height_;
}
protected: protected:
void outputGroundTruthData();
void outputGroundTruthData(); std::vector<std::shared_ptr<Object>> objects_;
int width_, height_;
std::vector<std::shared_ptr<Object>> objects_; ze::real_t tmax_;
int width_, height_; };
ze::real_t tmax_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,118 +4,136 @@
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadTexture(const std::string& path_to_img, cv::Mat *img, bool loadTexture(
double median_blur, double gaussian_blur); const std::string& path_to_img,
cv::Mat* img,
double median_blur,
double gaussian_blur
);
using Affine = cv::Matx<FloatType, 3,3>; using Affine = cv::Matx<FloatType, 3, 3>;
using AffineWithJacobian = std::pair<Affine, Affine>; using AffineWithJacobian = std::pair<Affine, Affine>;
class MotionParameters class MotionParameters {
{ public:
public: MotionParameters(
MotionParameters(FloatType tmax, FloatType tmax,
FloatType theta0_deg, FloatType theta1_deg, FloatType theta0_deg,
FloatType x0, FloatType x1, FloatType theta1_deg,
FloatType y0, FloatType y1, FloatType x0,
FloatType sx0, FloatType sx1, FloatType x1,
FloatType sy0, FloatType sy1) FloatType y0,
: tmax_(tmax), FloatType y1,
x0_(x0), FloatType sx0,
x1_(x1), FloatType sx1,
y0_(y0), FloatType sy0,
y1_(y1), FloatType sy1
theta0_(theta0_deg * CV_PI / 180.), )
theta1_(theta1_deg * CV_PI / 180.), : tmax_(tmax),
sx0_(sx0), x0_(x0),
sx1_(sx1), x1_(x1),
sy0_(sy0), y0_(y0),
sy1_(sy1) y1_(y1),
{ theta0_(theta0_deg * CV_PI / 180.),
} theta1_(theta1_deg * CV_PI / 180.),
sx0_(sx0),
sx1_(sx1),
sy0_(sy0),
sy1_(sy1) {}
AffineWithJacobian getAffineTransformationWithJacobian(ze::real_t t) AffineWithJacobian getAffineTransformationWithJacobian(ze::real_t t) {
{ // constants
// constants const ze::real_t dtheta = theta1_ - theta0_;
const ze::real_t dtheta = theta1_ - theta0_; const ze::real_t dx = x1_ - x0_;
const ze::real_t dx = x1_ - x0_; const ze::real_t dy = y1_ - y0_;
const ze::real_t dy = y1_ - y0_; const ze::real_t dsx = sx1_ - sx0_;
const ze::real_t dsx = sx1_ - sx0_; const ze::real_t dsy = sy1_ - sy0_;
const ze::real_t dsy = sy1_ - sy0_;
// computation of parameter(t) // computation of parameter(t)
const ze::real_t theta = theta0_ + t/tmax_ * dtheta; const ze::real_t theta = theta0_ + t / tmax_ * dtheta;
const ze::real_t x = x0_ + t/tmax_ * dx; const ze::real_t x = x0_ + t / tmax_ * dx;
const ze::real_t y = y0_ + t/tmax_ * dy; const ze::real_t y = y0_ + t / tmax_ * dy;
const ze::real_t sx = sx0_ + t/tmax_ * dsx; const ze::real_t sx = sx0_ + t / tmax_ * dsx;
const ze::real_t sy = sy0_ + t/tmax_ * dsy; const ze::real_t sy = sy0_ + t / tmax_ * dsy;
const ze::real_t stheta = std::sin(theta); const ze::real_t stheta = std::sin(theta);
const ze::real_t ctheta = std::cos(theta); const ze::real_t ctheta = std::cos(theta);
Affine A; Affine A;
A << sx * ctheta, -sy * stheta, x, A << sx * ctheta, -sy * stheta, x, sx * stheta, sy * ctheta, y, 0,
sx * stheta, sy * ctheta, y, 0, 1;
0, 0, 1;
// computation of dparameter_dt(t) // computation of dparameter_dt(t)
const ze::real_t dtheta_dt = 1./tmax_ * dtheta; const ze::real_t dtheta_dt = 1. / tmax_ * dtheta;
const ze::real_t dx_dt = 1./tmax_ * dx; const ze::real_t dx_dt = 1. / tmax_ * dx;
const ze::real_t dy_dt = 1./tmax_ * dy; const ze::real_t dy_dt = 1. / tmax_ * dy;
const ze::real_t dsx_dt = 1./tmax_ * dsx; const ze::real_t dsx_dt = 1. / tmax_ * dsx;
const ze::real_t dsy_dt = 1./tmax_ * dsy; const ze::real_t dsy_dt = 1. / tmax_ * dsy;
cv::Matx<FloatType, 3, 3> dAdt; cv::Matx<FloatType, 3, 3> dAdt;
dAdt << dsx_dt * ctheta - dtheta_dt * stheta * sx, -dsy_dt * stheta - dtheta_dt * ctheta * sy, dx_dt, dAdt << dsx_dt * ctheta - dtheta_dt * stheta * sx,
dsx_dt * stheta + dtheta_dt * ctheta * sx, dsy_dt * ctheta - dtheta_dt * stheta * sy, dy_dt, -dsy_dt * stheta - dtheta_dt * ctheta * sy, dx_dt,
0.0, 0.0, 0.0; dsx_dt * stheta + dtheta_dt * ctheta * sx,
dsy_dt * ctheta - dtheta_dt * stheta * sy, dy_dt, 0.0, 0.0, 0.0;
return AffineWithJacobian(A, dAdt); return AffineWithJacobian(A, dAdt);
} }
FloatType tmax_; FloatType tmax_;
FloatType x0_, x1_; FloatType x0_, x1_;
FloatType y0_, y1_; FloatType y0_, y1_;
FloatType theta0_, theta1_; FloatType theta0_, theta1_;
FloatType sx0_, sx1_; FloatType sx0_, sx1_;
FloatType sy0_, sy1_; FloatType sy0_, sy1_;
}; };
class Object class Object {
{ public:
public: Object(
Object(const std::string path_to_texture, const cv::Size& dst_size, const MotionParameters& motion_params, const std::string path_to_texture,
double median_blur, double gaussian_blur); const cv::Size& dst_size,
const MotionParameters& motion_params,
double median_blur,
double gaussian_blur
);
void draw(Time t, bool is_first_layer = false); void draw(Time t, bool is_first_layer = false);
cv::Mat canvas_; cv::Mat canvas_;
OpticFlow flow_; OpticFlow flow_;
MotionParameters getMotionParameters() const { return p_; } MotionParameters getMotionParameters() const {
return p_;
}
Affine getK0() const { return K0_; } Affine getK0() const {
Affine getK1() const { return K1_; } return K0_;
}
private: Affine getK1() const {
cv::Mat texture_; return K1_;
cv::Size dst_size_; }
MotionParameters p_;
Affine K0_, K1_; private:
}; cv::Mat texture_;
cv::Size dst_size_;
MotionParameters p_;
void getIntensityAndAlpha(const cv::Mat& image, Affine K0_, K1_;
int x, int y, };
ImageFloatType* intensity,
ImageFloatType* alpha);
inline ImageFloatType bgrToGrayscale(uchar b, void getIntensityAndAlpha(
uchar g, const cv::Mat& image,
uchar r) int x,
{ int y,
// https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/ ImageFloatType* intensity,
return 0.07 * static_cast<ImageFloatType>(b) ImageFloatType* alpha
+ 0.72 * static_cast<ImageFloatType>(g) );
+ 0.21 * static_cast<ImageFloatType>(r);
} inline ImageFloatType bgrToGrayscale(uchar b, uchar g, uchar r) {
// https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/
return 0.07 * static_cast<ImageFloatType>(b)
+ 0.72 * static_cast<ImageFloatType>(g)
+ 0.21 * static_cast<ImageFloatType>(r);
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,11 +1,10 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp> #include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp> #include <opencv2/core/eigen.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/file_utils.hpp> #include <ze/common/file_utils.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
@ -13,268 +12,312 @@ DECLARE_double(renderer_preprocess_median_blur);
DECLARE_double(renderer_preprocess_gaussian_blur); DECLARE_double(renderer_preprocess_gaussian_blur);
DEFINE_string(path_to_sequence_file, "", DEFINE_string(
"Path to the sequence file that describes the 2D, multi-object scene."); path_to_sequence_file,
"",
"Path to the sequence file that describes the 2D, multi-object scene."
);
DEFINE_string(path_to_output_folder, "", DEFINE_string(
"Path to the output folder where the flow ground truth files will be written"); path_to_output_folder,
"",
"Path to the output folder where the flow ground truth files "
"will be written"
);
DEFINE_bool(output_reverse_displacement_map, false, DEFINE_bool(
"Whether to output the reverse displacement map (i.e., the displacement field that maps pixels at t=tmax to pixels at t=t0"); output_reverse_displacement_map,
false,
"Whether to output the reverse displacement map (i.e., the "
"displacement field that maps pixels at t=tmax to pixels at t=t0"
);
namespace event_camera_simulator { namespace event_camera_simulator {
MultiObject2DRenderer::MultiObject2DRenderer() MultiObject2DRenderer::MultiObject2DRenderer() {
{ // Load sequence from file
// Load sequence from file const std::string path_to_sequence_file = FLAGS_path_to_sequence_file;
const std::string path_to_sequence_file = FLAGS_path_to_sequence_file;
std::ifstream sequence_file; std::ifstream sequence_file;
ze::openFileStream(path_to_sequence_file, &sequence_file); ze::openFileStream(path_to_sequence_file, &sequence_file);
std::string path_to_texture; std::string path_to_texture;
double median_blur, gaussian_blur, theta0, theta1, x0, x1, y0, y1, sx0, sx1, sy0, sy1; double median_blur, gaussian_blur, theta0, theta1, x0, x1, y0, y1, sx0,
sx1, sy0, sy1;
sequence_file >> width_ >> height_ >> tmax_; sequence_file >> width_ >> height_ >> tmax_;
CHECK_GT(width_, 0); CHECK_GT(width_, 0);
CHECK_GT(height_, 0); CHECK_GT(height_, 0);
CHECK_GT(tmax_, 0); CHECK_GT(tmax_, 0);
VLOG(1) << "width = " << width_ << ", height = " << height_; VLOG(1) << "width = " << width_ << ", height = " << height_;
VLOG(1) << "tmax = " << tmax_; VLOG(1) << "tmax = " << tmax_;
while(sequence_file >> path_to_texture while (sequence_file >> path_to_texture >> median_blur >> gaussian_blur
>> median_blur >> gaussian_blur >> theta0 >> theta1 >> x0 >> x1 >> y0 >> y1 >> sx0 >> sx1 >> sy0
>> theta0 >> theta1 >> sy1) {
>> x0 >> x1 MotionParameters params(
>> y0 >> y1 tmax_,
>> sx0 >> sx1 theta0,
>> sy0 >> sy1) theta1,
{ x0,
MotionParameters params(tmax_, x1,
theta0, theta1, y0,
x0, x1, y1,
y0, y1, sx0,
sx0, sx1, sx1,
sy0, sy1); sy0,
sy1
);
objects_.emplace_back(std::make_shared<Object>(path_to_texture, objects_.emplace_back(std::make_shared<Object>(
cv::Size(width_, height_), path_to_texture,
params, cv::Size(width_, height_),
median_blur, params,
gaussian_blur)); median_blur,
} gaussian_blur
));
if(FLAGS_path_to_output_folder != "")
{
outputGroundTruthData();
}
}
MultiObject2DRenderer::~MultiObject2DRenderer()
{
}
bool MultiObject2DRenderer::render(const Time t,
const ImagePtr& out_image,
const OpticFlowPtr& optic_flow_map) const {
if(ze::nanosecToSecTrunc(t) > tmax_)
{
return true;
}
CHECK_EQ(out_image->rows, height_);
CHECK_EQ(out_image->cols, width_);
out_image->setTo(0);
optic_flow_map->setTo(0.);
for(int i=0; i<objects_.size(); ++i)
{
const bool is_first_layer = (i == 0);
objects_[i]->draw(t, is_first_layer);
}
// composite the local images drawn by each object
// start from bottom image, merge it with the upper one
// then go one level up, merge the resulting image with the upper one
// and so on...
ImageFloatType intensity;
ImageFloatType alpha;
for(int i=0; i<objects_.size(); ++i)
{
const std::shared_ptr<Object>& object = objects_[i];
const cv::Mat& image = object->canvas_;
const OpticFlow& flow = object->flow_;
for(int y=0; y<out_image->rows; ++y)
{
for(int x=0; x<out_image->cols; ++x)
{
getIntensityAndAlpha(image, x, y, &intensity, &alpha);
(*out_image)(y,x) = alpha * intensity + (1.-alpha) * (*out_image)(y,x);
if(alpha > 0)
{
(*optic_flow_map)(y,x) = flow(y,x);
} }
}
if (FLAGS_path_to_output_folder != "")
outputGroundTruthData();
} }
}
return false; MultiObject2DRenderer::~MultiObject2DRenderer() {}
}
void MultiObject2DRenderer::outputGroundTruthData() bool MultiObject2DRenderer::render(
{ const Time t,
// This function generates some ground truth information and output if to the FLAGS_path_to_output_folder const ImagePtr& out_image,
// In principle, this information could be transmitted in a SimulationData structure and forwarded const OpticFlowPtr& optic_flow_map
// to a Publisher object that writes it to disk. ) const {
// However, for technical reasons, it is more convenient to write the displacement maps here, rather than integrate instantaneous if (ze::nanosecToSecTrunc(t) > tmax_)
// optic flow maps in a Publisher. return true;
// The following ground truth information is computed below and output to the ground truth folder: CHECK_EQ(out_image->rows, height_);
// - Displacement maps from t=0 to t=tmax (and vice-versa) CHECK_EQ(out_image->cols, width_);
// - Images 0 and images 1
VLOG(1) << "Will output ground truth data to folder: " << FLAGS_path_to_output_folder; out_image->setTo(0);
optic_flow_map->setTo(0.);
// Notation: the frame at time t=0 is denoted as frame 0, and the frame at time t=tmax is denoted as frame 1 for (int i = 0; i < objects_.size(); ++i) {
// Compute the entire displacement field from 0 to 1, for every layer const bool is_first_layer = (i == 0);
const ze::real_t t0 = 0.0; objects_[i]->draw(t, is_first_layer);
const ze::real_t t1 = tmax_;
// Every entry in this vector is a displacement map that maps pixel locations in image 0
// to the corresponding pixel location in image 1, for the layer i
//
// this individual displacement layers are then merged together to form the final
// displacement map later
//
// Note: the reverse layer-wise displacement map can be computed as follows:
// layer_displacement_01[i] = -layer_displacement_10[i]
std::vector<OpticFlow> layer_displacements_10(objects_.size());
for(int i=0; i<objects_.size(); ++i)
{
layer_displacements_10[i] = OpticFlow(height_, width_);
const Object object = *(objects_[i]);
Affine A_t0_src = object.getMotionParameters().getAffineTransformationWithJacobian(t0).first;
Affine A_t1_src = object.getMotionParameters().getAffineTransformationWithJacobian(t1).first;
Affine A_t1_t0 = A_t1_src * A_t0_src.inv();
// M_t1_t0 maps any point on the first image to its position in the last image
Affine M_t1_t0 = object.getK1() * A_t1_t0 * object.getK1().inv();
for(int y=0; y<height_; ++y)
{
for(int x=0; x<width_; ++x)
{
FloatType x_t1 = M_t1_t0(0,0) * x + M_t1_t0(0,1) * y + M_t1_t0(0,2);
FloatType y_t1 = M_t1_t0(1,0) * x + M_t1_t0(1,1) * y + M_t1_t0(1,2);
FloatType displacement_x, displacement_y;
// if the pixel went out of the field of view, we assign a displacement of 0 (convention)
displacement_x = (x_t1 >= 0 && x_t1 < width_) ? (ImageFloatType) x_t1 - (ImageFloatType) x : 0.;
displacement_y = (y_t1 >= 0 && y_t1 < height_) ? (ImageFloatType) y_t1 - (ImageFloatType) y : 0.;
layer_displacements_10[i](y,x)[0] = displacement_x;
layer_displacements_10[i](y,x)[1] = displacement_y;
}
}
}
ImageFloatType intensity, alpha;
// First, merge the displacement map from 0 to 1
OpticFlow displacement_map_10(height_, width_); // displacement map from 0 to 1
Image image0(height_, width_);
image0.setTo(0);
for(int i=0; i<objects_.size(); ++i)
{
const bool is_first_layer = (i == 0);
objects_[i]->draw(t0, is_first_layer);
}
for(int i=0; i<objects_.size(); ++i)
{
const std::shared_ptr<Object>& object = objects_[i];
for(int y=0; y<height_; ++y)
{
for(int x=0; x<width_; ++x)
{
getIntensityAndAlpha(object->canvas_, x, y, &intensity, &alpha);
image0(y,x) = alpha * intensity + (1.-alpha) * image0(y,x);
if(alpha > 0)
{
displacement_map_10(y,x) = layer_displacements_10[i](y,x);
} }
}
}
}
// Second, create displacement map from 1 to 0 // composite the local images drawn by each object
OpticFlow displacement_map_01(height_, width_); // displacement map from 1 to 0 // start from bottom image, merge it with the upper one
Image image1(height_, width_); // then go one level up, merge the resulting image with the upper one
image1.setTo(0); // and so on...
for(int i=0; i<objects_.size(); ++i) ImageFloatType intensity;
{ ImageFloatType alpha;
const bool is_first_layer = (i == 0);
objects_[i]->draw(ze::secToNanosec(t1), is_first_layer);
}
for(int i=0; i<objects_.size(); ++i) for (int i = 0; i < objects_.size(); ++i) {
{ const std::shared_ptr<Object>& object = objects_[i];
for(int y=0; y<height_; ++y) const cv::Mat& image = object->canvas_;
{ const OpticFlow& flow = object->flow_;
for(int x=0; x<width_; ++x)
{ for (int y = 0; y < out_image->rows; ++y) {
getIntensityAndAlpha(objects_[i]->canvas_, x, y, &intensity, &alpha); for (int x = 0; x < out_image->cols; ++x) {
image1(y,x) = alpha * intensity + (1.-alpha) * image1(y,x); getIntensityAndAlpha(image, x, y, &intensity, &alpha);
if(alpha > 0) (*out_image)(y, x) =
{ alpha * intensity + (1. - alpha) * (*out_image)(y, x);
displacement_map_01(y,x) = -layer_displacements_10[i](y,x);
if (alpha > 0)
(*optic_flow_map)(y, x) = flow(y, x);
}
}
} }
}
return false;
} }
}
std::ofstream displacement_file_10, displacement_file_01; void MultiObject2DRenderer::outputGroundTruthData() {
ze::openOutputFileStream(ze::joinPath(FLAGS_path_to_output_folder, "displacement_10.txt"), &displacement_file_10); // This function generates some ground truth information and output if
// to the FLAGS_path_to_output_folder In principle, this information
// could be transmitted in a SimulationData structure and forwarded to a
// Publisher object that writes it to disk. However, for technical
// reasons, it is more convenient to write the displacement maps here,
// rather than integrate instantaneous optic flow maps in a Publisher.
if(FLAGS_output_reverse_displacement_map) // The following ground truth information is computed below and output
{ // to the ground truth folder:
ze::openOutputFileStream(ze::joinPath(FLAGS_path_to_output_folder, "displacement_01.txt"), &displacement_file_01); // - Displacement maps from t=0 to t=tmax (and vice-versa)
} // - Images 0 and images 1
for(int y=0; y<height_; ++y) VLOG(1) << "Will output ground truth data to folder: "
{ << FLAGS_path_to_output_folder;
for(int x=0; x<width_; ++x)
{
displacement_file_10 << displacement_map_10(y,x)[0] << " " << displacement_map_10(y,x)[1] << std::endl;
if(FLAGS_output_reverse_displacement_map) // Notation: the frame at time t=0 is denoted as frame 0, and the frame
{ // at time t=tmax is denoted as frame 1 Compute the entire displacement
displacement_file_01 << displacement_map_01(y,x)[0] << " " << displacement_map_01(y,x)[1] << std::endl; // field from 0 to 1, for every layer
} const ze::real_t t0 = 0.0;
const ze::real_t t1 = tmax_;
// Every entry in this vector is a displacement map that maps pixel
// locations in image 0 to the corresponding pixel location in image 1,
// for the layer i
//
// this individual displacement layers are then merged together to form
// the final displacement map later
//
// Note: the reverse layer-wise displacement map can be computed as
// follows:
// layer_displacement_01[i] = -layer_displacement_10[i]
std::vector<OpticFlow> layer_displacements_10(objects_.size());
for (int i = 0; i < objects_.size(); ++i) {
layer_displacements_10[i] = OpticFlow(height_, width_);
const Object object = *(objects_[i]);
Affine A_t0_src = object.getMotionParameters()
.getAffineTransformationWithJacobian(t0)
.first;
Affine A_t1_src = object.getMotionParameters()
.getAffineTransformationWithJacobian(t1)
.first;
Affine A_t1_t0 = A_t1_src * A_t0_src.inv();
// M_t1_t0 maps any point on the first image to its position in the
// last image
Affine M_t1_t0 = object.getK1() * A_t1_t0 * object.getK1().inv();
for (int y = 0; y < height_; ++y) {
for (int x = 0; x < width_; ++x) {
FloatType x_t1 =
M_t1_t0(0, 0) * x + M_t1_t0(0, 1) * y + M_t1_t0(0, 2);
FloatType y_t1 =
M_t1_t0(1, 0) * x + M_t1_t0(1, 1) * y + M_t1_t0(1, 2);
FloatType displacement_x, displacement_y;
// if the pixel went out of the field of view, we assign a
// displacement of 0 (convention)
displacement_x =
(x_t1 >= 0 && x_t1 < width_)
? (ImageFloatType) x_t1 - (ImageFloatType) x
: 0.;
displacement_y =
(y_t1 >= 0 && y_t1 < height_)
? (ImageFloatType) y_t1 - (ImageFloatType) y
: 0.;
layer_displacements_10[i](y, x)[0] = displacement_x;
layer_displacements_10[i](y, x)[1] = displacement_y;
}
}
}
ImageFloatType intensity, alpha;
// First, merge the displacement map from 0 to 1
OpticFlow displacement_map_10(
height_,
width_
); // displacement map from 0 to 1
Image image0(height_, width_);
image0.setTo(0);
for (int i = 0; i < objects_.size(); ++i) {
const bool is_first_layer = (i == 0);
objects_[i]->draw(t0, is_first_layer);
}
for (int i = 0; i < objects_.size(); ++i) {
const std::shared_ptr<Object>& object = objects_[i];
for (int y = 0; y < height_; ++y) {
for (int x = 0; x < width_; ++x) {
getIntensityAndAlpha(
object->canvas_,
x,
y,
&intensity,
&alpha
);
image0(y, x) =
alpha * intensity + (1. - alpha) * image0(y, x);
if (alpha > 0) {
displacement_map_10(y, x) =
layer_displacements_10[i](y, x);
}
}
}
}
// Second, create displacement map from 1 to 0
OpticFlow displacement_map_01(
height_,
width_
); // displacement map from 1 to 0
Image image1(height_, width_);
image1.setTo(0);
for (int i = 0; i < objects_.size(); ++i) {
const bool is_first_layer = (i == 0);
objects_[i]->draw(ze::secToNanosec(t1), is_first_layer);
}
for (int i = 0; i < objects_.size(); ++i) {
for (int y = 0; y < height_; ++y) {
for (int x = 0; x < width_; ++x) {
getIntensityAndAlpha(
objects_[i]->canvas_,
x,
y,
&intensity,
&alpha
);
image1(y, x) =
alpha * intensity + (1. - alpha) * image1(y, x);
if (alpha > 0) {
displacement_map_01(y, x) =
-layer_displacements_10[i](y, x);
}
}
}
}
std::ofstream displacement_file_10, displacement_file_01;
ze::openOutputFileStream(
ze::joinPath(FLAGS_path_to_output_folder, "displacement_10.txt"),
&displacement_file_10
);
if (FLAGS_output_reverse_displacement_map) {
ze::openOutputFileStream(
ze::joinPath(
FLAGS_path_to_output_folder,
"displacement_01.txt"
),
&displacement_file_01
);
}
for (int y = 0; y < height_; ++y) {
for (int x = 0; x < width_; ++x) {
displacement_file_10 << displacement_map_10(y, x)[0] << " "
<< displacement_map_10(y, x)[1]
<< std::endl;
if (FLAGS_output_reverse_displacement_map) {
displacement_file_01 << displacement_map_01(y, x)[0] << " "
<< displacement_map_01(y, x)[1]
<< std::endl;
}
}
}
displacement_file_10.close();
if (FLAGS_output_reverse_displacement_map)
displacement_file_01.close();
cv::Mat disp_image0, disp_image1;
image0.convertTo(disp_image0, CV_8U, 255);
image1.convertTo(disp_image1, CV_8U, 255);
cv::imwrite(
ze::joinPath(FLAGS_path_to_output_folder, "image0.png"),
disp_image0
);
cv::imwrite(
ze::joinPath(FLAGS_path_to_output_folder, "image1.png"),
disp_image1
);
} }
}
displacement_file_10.close();
if(FLAGS_output_reverse_displacement_map)
{
displacement_file_01.close();
}
cv::Mat disp_image0, disp_image1;
image0.convertTo(disp_image0, CV_8U, 255);
image1.convertTo(disp_image1, CV_8U, 255);
cv::imwrite(ze::joinPath(FLAGS_path_to_output_folder, "image0.png"), disp_image0);
cv::imwrite(ze::joinPath(FLAGS_path_to_output_folder, "image1.png"), disp_image1);
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,129 +1,131 @@
#include <esim/imp_multi_objects_2d/object.hpp> #include <esim/imp_multi_objects_2d/object.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp> #include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/time_conversions.hpp> #include <ze/common/time_conversions.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadTexture(const std::string& path_to_img, cv::Mat* img, bool loadTexture(
double median_blur, double gaussian_blur) const std::string& path_to_img,
{ cv::Mat* img,
CHECK(img); double median_blur,
VLOG(1) << "Loading texture file from file: " << path_to_img << "."; double gaussian_blur
) {
CHECK(img);
VLOG(1) << "Loading texture file from file: " << path_to_img << ".";
*img = cv::imread(path_to_img, cv::IMREAD_UNCHANGED); *img = cv::imread(path_to_img, cv::IMREAD_UNCHANGED);
if(!img->data) if (!img->data) {
{ LOG(FATAL) << "Could not open image at: " << path_to_img << ".";
LOG(FATAL) << "Could not open image at: " << path_to_img << "."; return false;
return false; }
}
if(median_blur > 1) if (median_blur > 1) {
{ VLOG(1) << "Pre-filtering the texture with median filter of size: "
VLOG(1) << "Pre-filtering the texture with median filter of size: " << median_blur << ".";
<< median_blur << "."; cv::medianBlur(*img, *img, median_blur);
cv::medianBlur(*img, *img, median_blur); }
}
if(gaussian_blur > 0) if (gaussian_blur > 0) {
{ VLOG(1
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: " ) << "Pre-filtering the texture with gaussian filter of size: "
<< gaussian_blur << "."; << gaussian_blur << ".";
cv::GaussianBlur(*img, *img, cv::Size(21,21), gaussian_blur); cv::GaussianBlur(*img, *img, cv::Size(21, 21), gaussian_blur);
} }
return true; return true;
}
Object::Object(const std::string path_to_texture, const cv::Size& dst_size, const MotionParameters& motion_params,
double median_blur, double gaussian_blur)
: dst_size_(dst_size),
p_(motion_params)
{
loadTexture(path_to_texture, &texture_,
median_blur,
gaussian_blur);
K0_ << texture_.cols, 0, 0.5 * texture_.cols,
0, texture_.rows, 0.5 * texture_.rows,
0, 0, 1;
K1_ << dst_size_.width, 0, 0.5 * dst_size_.width,
0, dst_size_.height, 0.5 * dst_size_.height,
0, 0, 1;
canvas_ = cv::Mat(dst_size, CV_8UC4);
canvas_.setTo(0);
flow_ = OpticFlow(dst_size);
flow_.setTo(0);
}
void Object::draw(Time t, bool is_first_layer)
{
canvas_.setTo(0);
ze::real_t ts = ze::nanosecToSecTrunc(t);
ts = std::min(ts, p_.tmax_);
AffineWithJacobian A10_jac = p_.getAffineTransformationWithJacobian(ts);
Affine& A10 = A10_jac.first;
Affine& dA10dt = A10_jac.second;
// test jacobian
// const ze::real_t h = 1e-5;
// AffineWithJacobian A = p_.getAffineTransformationWithJacobian(ts);
// AffineWithJacobian Ah = p_.getAffineTransformationWithJacobian(ts+h);
// Affine dAdt_numeric = 1./h * (Ah.first-A.first);
// LOG(INFO) << dAdt_numeric;
// LOG(INFO) << A.second;
Affine M_10 = K1_ * A10 * K0_.inv();
Affine dM10_dt = K1_ * dA10dt * K0_.inv();
// warpAffine requires M_dst_src unless the WARP_INVERSE_MAP flag is passed
// in which case it will require M_src_dst
// TODO: can we do something more efficient than fully warping the 4 channels (BGR+alpha)?
int border_mode = is_first_layer ? cv::BORDER_REFLECT101 : cv::BORDER_CONSTANT;
cv::warpPerspective(texture_,
canvas_,
M_10,
canvas_.size(),
cv::INTER_LINEAR,
border_mode);
cv::Matx<FloatType, 3, 3> SS = dM10_dt * M_10.inv();
for(int y=0; y<flow_.rows; ++y)
{
for(int x=0; x<flow_.cols; ++x)
{
flow_(y,x)[0] = SS(0,0) * x + SS(0,1) * y + SS(0,2);
flow_(y,x)[1] = SS(1,0) * x + SS(1,1) * y + SS(1,2);
} }
}
}
void getIntensityAndAlpha(const cv::Mat& image, Object::Object(
int x, int y, const std::string path_to_texture,
ImageFloatType* intensity, const cv::Size& dst_size,
ImageFloatType* alpha) const MotionParameters& motion_params,
{ double median_blur,
CHECK(image.type() == CV_8UC3 || image.type() == CV_8UC4); double gaussian_blur
)
: dst_size_(dst_size),
p_(motion_params) {
loadTexture(path_to_texture, &texture_, median_blur, gaussian_blur);
if(image.type() == CV_8UC3) K0_ << texture_.cols, 0, 0.5 * texture_.cols, 0, texture_.rows,
{ 0.5 * texture_.rows, 0, 0, 1;
cv::Vec3b val = image.at<cv::Vec3b>(y,x);
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.; K1_ << dst_size_.width, 0, 0.5 * dst_size_.width, 0, dst_size_.height,
*alpha = 1.; 0.5 * dst_size_.height, 0, 0, 1;
}
else canvas_ = cv::Mat(dst_size, CV_8UC4);
{ canvas_.setTo(0);
cv::Vec4b val = image.at<cv::Vec4b>(y,x); flow_ = OpticFlow(dst_size);
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.; flow_.setTo(0);
*alpha = static_cast<ImageFloatType>(val[3]) / 255.; }
}
} void Object::draw(Time t, bool is_first_layer) {
canvas_.setTo(0);
ze::real_t ts = ze::nanosecToSecTrunc(t);
ts = std::min(ts, p_.tmax_);
AffineWithJacobian A10_jac = p_.getAffineTransformationWithJacobian(ts);
Affine& A10 = A10_jac.first;
Affine& dA10dt = A10_jac.second;
// test jacobian
// const ze::real_t h = 1e-5;
// AffineWithJacobian A = p_.getAffineTransformationWithJacobian(ts);
// AffineWithJacobian Ah =
// p_.getAffineTransformationWithJacobian(ts+h); Affine dAdt_numeric
// = 1./h * (Ah.first-A.first);
// LOG(INFO) << dAdt_numeric;
// LOG(INFO) << A.second;
Affine M_10 = K1_ * A10 * K0_.inv();
Affine dM10_dt = K1_ * dA10dt * K0_.inv();
// warpAffine requires M_dst_src unless the WARP_INVERSE_MAP flag is
// passed in which case it will require M_src_dst
// TODO: can we do something more efficient than fully warping the 4
// channels (BGR+alpha)?
int border_mode =
is_first_layer ? cv::BORDER_REFLECT101 : cv::BORDER_CONSTANT;
cv::warpPerspective(
texture_,
canvas_,
M_10,
canvas_.size(),
cv::INTER_LINEAR,
border_mode
);
cv::Matx<FloatType, 3, 3> SS = dM10_dt * M_10.inv();
for (int y = 0; y < flow_.rows; ++y) {
for (int x = 0; x < flow_.cols; ++x) {
flow_(y, x)[0] = SS(0, 0) * x + SS(0, 1) * y + SS(0, 2);
flow_(y, x)[1] = SS(1, 0) * x + SS(1, 1) * y + SS(1, 2);
}
}
}
void getIntensityAndAlpha(
const cv::Mat& image,
int x,
int y,
ImageFloatType* intensity,
ImageFloatType* alpha
) {
CHECK(image.type() == CV_8UC3 || image.type() == CV_8UC4);
if (image.type() == CV_8UC3) {
cv::Vec3b val = image.at<cv::Vec3b>(y, x);
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
*alpha = 1.;
} else {
cv::Vec4b val = image.at<cv::Vec4b>(y, x);
*intensity = bgrToGrayscale(val[0], val[1], val[2]) / 255.;
*alpha = static_cast<ImageFloatType>(val[3]) / 255.;
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -2,134 +2,164 @@
#include <esim/rendering/renderer_base.hpp> #include <esim/rendering/renderer_base.hpp>
class Shader; // fwd class Shader; // fwd
class Model; // fwd class Model; // fwd
class GLFWwindow; // fwd class GLFWwindow; // fwd
namespace event_camera_simulator { namespace event_camera_simulator {
//! Rendering engine based on OpenGL //! Rendering engine based on OpenGL
class OpenGLRenderer : public Renderer class OpenGLRenderer : public Renderer {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
ZE_POINTER_TYPEDEFS(Renderer); ZE_POINTER_TYPEDEFS(Renderer);
OpenGLRenderer(); OpenGLRenderer();
~OpenGLRenderer(); ~OpenGLRenderer();
//! Render an image at a given pose. //! Render an image at a given pose.
virtual void render(const Transformation& T_W_C, virtual void render(
const std::vector<Transformation>& T_W_OBJ, const Transformation& T_W_C,
const ImagePtr& out_image, const std::vector<Transformation>& T_W_OBJ,
const DepthmapPtr& out_depthmap) const; const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const;
//! Returns true if the rendering engine can compute optic flow, false otherwise //! Returns true if the rendering engine can compute optic flow, false
virtual bool canComputeOpticFlow() const override { return true; } //! otherwise
virtual bool canComputeOpticFlow() const override {
return true;
}
//! Render an image + depth map + optic flow map at a given pose, //! Render an image + depth map + optic flow map at a given pose,
//! given the camera linear and angular velocity //! given the camera linear and angular velocity
virtual void renderWithFlow(const Transformation& T_W_C, virtual void renderWithFlow(
const LinearVelocity& v_WC, const Transformation& T_W_C,
const AngularVelocity& w_WC, const LinearVelocity& v_WC,
const std::vector<Transformation>& T_W_OBJ, const AngularVelocity& w_WC,
const std::vector<LinearVelocity>& linear_velocity_obj, const std::vector<Transformation>& T_W_OBJ,
const std::vector<AngularVelocity>& angular_velocity_obj, const std::vector<LinearVelocity>& linear_velocity_obj,
const ImagePtr& out_image, const std::vector<AngularVelocity>& angular_velocity_obj,
const DepthmapPtr& out_depthmap, const ImagePtr& out_image,
const OpticFlowPtr& optic_flow_map) const override; const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map
) const override;
//! Sets the camera //! Sets the camera
virtual void setCamera(const ze::Camera::Ptr& camera); virtual void setCamera(const ze::Camera::Ptr& camera);
protected: protected:
void init();
void init(); /**
@brief basic function to produce an OpenGL projection matrix and
associated viewport parameters which match a given set of camera
intrinsics. This is currently written for the Eigen linear algebra
library, however it should be straightforward to port to any 4x4 matrix
class.
@param[out] frustum Eigen::Matrix4d projection matrix. Eigen stores
these matrices in column-major (i.e. OpenGL) order.
@param[in] alpha x-axis focal length, from camera intrinsic matrix
@param[in] alpha y-axis focal length, from camera intrinsic matrix
@param[in] skew x and y axis skew, from camera intrinsic matrix
@param[in] u0 image origin x-coordinate, from camera intrinsic matrix
@param[in] v0 image origin y-coordinate, from camera intrinsic matrix
@param[in] img_width image width, in pixels
@param[in] img_height image height, in pixels
@param[in] near_clip near clipping plane z-location, can be set
arbitrarily > 0, controls the mapping of z-coordinates for OpenGL
@param[in] far_clip far clipping plane z-location, can be set
arbitrarily > near_clip, controls the mapping of z-coordinate for
OpenGL
/** Code adapted from:
@brief basic function to produce an OpenGL projection matrix and associated viewport parameters - http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
which match a given set of camera intrinsics. This is currently written for the Eigen linear - https://pastebin.com/h8nYNWJY
algebra library, however it should be straightforward to port to any 4x4 matrix class. */
@param[out] frustum Eigen::Matrix4d projection matrix. Eigen stores these matrices in column-major (i.e. OpenGL) order. void build_opengl_projection_for_intrinsics(
@param[in] alpha x-axis focal length, from camera intrinsic matrix Eigen::Matrix4d& frustum,
@param[in] alpha y-axis focal length, from camera intrinsic matrix double alpha,
@param[in] skew x and y axis skew, from camera intrinsic matrix double beta,
@param[in] u0 image origin x-coordinate, from camera intrinsic matrix double u0,
@param[in] v0 image origin y-coordinate, from camera intrinsic matrix double v0,
@param[in] img_width image width, in pixels int img_width,
@param[in] img_height image height, in pixels int img_height,
@param[in] near_clip near clipping plane z-location, can be set arbitrarily > 0, controls the mapping of z-coordinates for OpenGL double near_clip,
@param[in] far_clip far clipping plane z-location, can be set arbitrarily > near_clip, controls the mapping of z-coordinate for OpenGL double far_clip
) const {
// These parameters define the final viewport that is rendered into
// by the camera.
double L = 0;
double R = img_width;
double B = 0;
double T = img_height;
Code adapted from: // near and far clipping planes, these only matter for the mapping
- http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/ // from world-space z-coordinate into the depth coordinate for
- https://pastebin.com/h8nYNWJY // OpenGL
*/ double N = near_clip;
void build_opengl_projection_for_intrinsics(Eigen::Matrix4d &frustum, double alpha, double beta, double u0, double v0, int img_width, int img_height, double near_clip, double far_clip) const { double F = far_clip;
// These parameters define the final viewport that is rendered into by double skew = 0.0;
// the camera.
double L = 0;
double R = img_width;
double B = 0;
double T = img_height;
// near and far clipping planes, these only matter for the mapping from // construct an orthographic matrix which maps from projected
// world-space z-coordinate into the depth coordinate for OpenGL // coordinates to normalized device coordinates in the range
double N = near_clip; // [-1, 1]. OpenGL then maps coordinates in NDC to the current
double F = far_clip; // viewport
Eigen::Matrix4d ortho = Eigen::Matrix4d::Zero();
ortho(0, 0) = 2.0 / (R - L);
ortho(0, 3) = -(R + L) / (R - L);
ortho(1, 1) = 2.0 / (T - B);
ortho(1, 3) = -(T + B) / (T - B);
ortho(2, 2) = -2.0 / (F - N);
ortho(2, 3) = -(F + N) / (F - N);
ortho(3, 3) = 1.0;
double skew = 0.0; // construct a projection matrix, this is identical to the
// projection matrix computed for the intrinsicx, except an
// additional row is inserted to map the z-coordinate to
// OpenGL.
Eigen::Matrix4d tproj = Eigen::Matrix4d::Zero();
tproj(0, 0) = alpha;
tproj(0, 1) = skew;
tproj(0, 2) = -u0;
tproj(1, 1) = beta;
tproj(1, 2) = -v0;
tproj(2, 2) = N + F;
tproj(2, 3) = N * F;
tproj(3, 2) = -1.0;
// construct an orthographic matrix which maps from projected // resulting OpenGL frustum is the product of the orthographic
// coordinates to normalized device coordinates in the range // mapping to normalized device coordinates and the augmented
// [-1, 1]. OpenGL then maps coordinates in NDC to the current // camera intrinsic matrix
// viewport frustum = ortho * tproj;
Eigen::Matrix4d ortho = Eigen::Matrix4d::Zero(); }
ortho(0,0) = 2.0/(R-L); ortho(0,3) = -(R+L)/(R-L);
ortho(1,1) = 2.0/(T-B); ortho(1,3) = -(T+B)/(T-B);
ortho(2,2) = -2.0/(F-N); ortho(2,3) = -(F+N)/(F-N);
ortho(3,3) = 1.0;
// construct a projection matrix, this is identical to the GLFWwindow* window;
// projection matrix computed for the intrinsicx, except an std::unique_ptr<Shader> shader;
// additional row is inserted to map the z-coordinate to std::unique_ptr<Shader> optic_flow_shader;
// OpenGL. std::unique_ptr<Model> our_model;
Eigen::Matrix4d tproj = Eigen::Matrix4d::Zero(); std::vector<std::unique_ptr<Model>> dynamic_objects_model;
tproj(0,0) = alpha; tproj(0,1) = skew; tproj(0,2) = -u0;
tproj(1,1) = beta; tproj(1,2) = -v0;
tproj(2,2) = N+F; tproj(2,3) = N*F;
tproj(3,2) = -1.0;
// resulting OpenGL frustum is the product of the orthographic bool is_initialized_;
// mapping to normalized device coordinates and the augmented
// camera intrinsic matrix
frustum = ortho*tproj;
}
GLFWwindow* window; unsigned int width_;
std::unique_ptr<Shader> shader; unsigned int height_;
std::unique_ptr<Shader> optic_flow_shader;
std::unique_ptr<Model> our_model;
std::vector<std::unique_ptr<Model>> dynamic_objects_model;
bool is_initialized_; unsigned int texture1;
unsigned int texture2;
unsigned int width_; unsigned int VBO, VAO;
unsigned int height_; unsigned int multisampled_fbo, multisampled_color_buf,
multisampled_depth_buf;
unsigned int fbo, color_buf, depth_buf,
depth_buf_of; // framebuffer for color and depth images
unsigned int fbo_of, of_buf; // framebuffer for optic flow
unsigned int texture1; float zmin = 0.1f;
unsigned int texture2; float zmax = 10.0f;
};
unsigned int VBO, VAO;
unsigned int multisampled_fbo, multisampled_color_buf, multisampled_depth_buf;
unsigned int fbo, color_buf, depth_buf, depth_buf_of; // framebuffer for color and depth images
unsigned int fbo_of, of_buf; // framebuffer for optic flow
float zmin = 0.1f;
float zmax = 10.0f;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,48 +4,60 @@
namespace event_camera_simulator { namespace event_camera_simulator {
//! A rendering engine for planar scenes //! A rendering engine for planar scenes
class PanoramaRenderer : public Renderer class PanoramaRenderer : public Renderer {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
PanoramaRenderer(const Image& texture, PanoramaRenderer(
const Transformation::Rotation& R_W_P); const Image& texture, const Transformation::Rotation& R_W_P
);
~PanoramaRenderer(); ~PanoramaRenderer();
//! Render image and depth map for a given camera pose //! Render image and depth map for a given camera pose
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const; virtual void render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const;
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override void render(
{ const Transformation& T_W_C,
render(T_W_C, out_image, out_depthmap); const std::vector<Transformation>& T_W_OBJ,
} const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const override {
render(T_W_C, out_image, out_depthmap);
}
//! Returns true if the rendering engine can compute optic flow, false otherwise //! Returns true if the rendering engine can compute optic flow, false
virtual bool canComputeOpticFlow() const override { return false; } //! otherwise
virtual bool canComputeOpticFlow() const override {
return false;
}
virtual void setCamera(const ze::Camera::Ptr& camera) override; virtual void setCamera(const ze::Camera::Ptr& camera) override;
protected: protected:
void precomputePixelToBearingLookupTable();
void projectToPanorama(
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
) const;
void precomputePixelToBearingLookupTable(); // Texture mapped on the plane
void projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const; Image texture_;
Transformation::Rotation R_W_P_;
// Texture mapped on the plane // Intrinsic parameters of the panorama camera
Image texture_; FloatType fx_, fy_;
Transformation::Rotation R_W_P_; ze::Keypoint principal_point_;
// Intrinsic parameters of the panorama camera // Precomputed lookup table from keypoint -> bearing vector
FloatType fx_, fy_; ze::Bearings bearings_C_;
ze::Keypoint principal_point_;
// Precomputed lookup table from keypoint -> bearing vector // Preallocated warping matrices
ze::Bearings bearings_C_; mutable cv::Mat_<float> map_x_, map_y_;
};
// Preallocated warping matrices
mutable cv::Mat_<float> map_x_, map_y_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,113 +1,111 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/imp_panorama_renderer/panorama_renderer.hpp> #include <esim/imp_panorama_renderer/panorama_renderer.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp> #include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
PanoramaRenderer::PanoramaRenderer(const Image& texture, PanoramaRenderer::PanoramaRenderer(
const Transformation::Rotation &R_W_P) const Image& texture, const Transformation::Rotation& R_W_P
: texture_(texture), )
R_W_P_(R_W_P) : texture_(texture),
{ R_W_P_(R_W_P) {
principal_point_ = ze::Keypoint(0.5 * texture_.cols, principal_point_ =
0.5 * texture_.rows); ze::Keypoint(0.5 * texture_.cols, 0.5 * texture_.rows);
fx_ = texture_.cols / (2.0 * CV_PI); fx_ = texture_.cols / (2.0 * CV_PI);
fy_ = texture_.rows / CV_PI; fy_ = texture_.rows / CV_PI;
LOG(INFO) << "Initialized panoramic camera with size: " LOG(INFO) << "Initialized panoramic camera with size: " << texture_.cols
<< texture_.cols << "x" << texture_.rows << "x" << texture_.rows
<< ", principal point: " << principal_point_.transpose() << ", principal point: " << principal_point_.transpose()
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px"; << ", fx = " << fx_ << "px , fy = " << fy_ << " px";
}
PanoramaRenderer::~PanoramaRenderer()
{
}
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera)
{
CHECK(camera);
camera_ = camera;
precomputePixelToBearingLookupTable();
// Preallocate memory for the warping maps
cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
map_y_ = cv::Mat_<float>::zeros(sensor_size);
}
void PanoramaRenderer::precomputePixelToBearingLookupTable()
{
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
} }
}
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
PanoramaRenderer::~PanoramaRenderer() {}
void PanoramaRenderer::projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera) {
{ CHECK(camera);
CHECK(keypoint); camera_ = camera;
static constexpr FloatType kEpsilon = 1e-10;
const FloatType X = f[0]; precomputePixelToBearingLookupTable();
const FloatType Y = f[1];
const FloatType Z = f[2];
const FloatType rho2 = X*X+Y*Y+Z*Z; // Preallocate memory for the warping maps
const FloatType rho = std::sqrt(rho2); cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
CHECK_GE(rho, kEpsilon); map_y_ = cv::Mat_<float>::zeros(sensor_size);
const FloatType phi = std::atan2(X, Z);
const FloatType theta = std::asin(-Y/rho);
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
}
void PanoramaRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
{
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation::RotationMatrix R_P_C = R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
ze::Bearings bearings_P = R_P_C * bearings_C_;
ze::Keypoint keypoint;
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
// project bearing vector to panorama image
projectToPanorama(f, &keypoint);
map_x_(y,x) = static_cast<float>(keypoint[0]);
map_y_(y,x) = static_cast<float>(keypoint[1]);
} }
}
cv::remap(texture_, *out_image, map_x_, map_y_, cv::INTER_LINEAR, cv::BORDER_REFLECT_101); void PanoramaRenderer::precomputePixelToBearingLookupTable() {
// points_C is a matrix containing the coordinates of each pixel
// coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for (int y = 0; y < camera_->height(); ++y)
for (int x = 0; x < camera_->width(); ++x)
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
if(out_depthmap) void PanoramaRenderer::projectToPanorama(
{ const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
static constexpr ImageFloatType infinity = 1e10; ) const {
out_depthmap->setTo(infinity); CHECK(keypoint);
} static constexpr FloatType kEpsilon = 1e-10;
}
const FloatType X = f[0];
const FloatType Y = f[1];
const FloatType Z = f[2];
const FloatType rho2 = X * X + Y * Y + Z * Z;
const FloatType rho = std::sqrt(rho2);
CHECK_GE(rho, kEpsilon);
const FloatType phi = std::atan2(X, Z);
const FloatType theta = std::asin(-Y / rho);
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
}
void PanoramaRenderer::render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const {
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation::RotationMatrix R_P_C =
R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
ze::Bearings bearings_P = R_P_C * bearings_C_;
ze::Keypoint keypoint;
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
// project bearing vector to panorama image
projectToPanorama(f, &keypoint);
map_x_(y, x) = static_cast<float>(keypoint[0]);
map_y_(y, x) = static_cast<float>(keypoint[1]);
}
}
cv::remap(
texture_,
*out_image,
map_x_,
map_y_,
cv::INTER_LINEAR,
cv::BORDER_REFLECT_101
);
if (out_depthmap) {
static constexpr ImageFloatType infinity = 1e10;
out_depthmap->setTo(infinity);
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,40 +4,45 @@
namespace event_camera_simulator { namespace event_camera_simulator {
//! A rendering engine for planar scenes //! A rendering engine for planar scenes
class PanoramaRenderer : public Renderer class PanoramaRenderer : public Renderer {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
PanoramaRenderer(const Image& texture, PanoramaRenderer(
const Transformation::Rotation& R_W_P); const Image& texture, const Transformation::Rotation& R_W_P
);
~PanoramaRenderer(); ~PanoramaRenderer();
//! Render image and depth map for a given camera pose //! Render image and depth map for a given camera pose
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override; virtual void render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const override;
virtual void setCamera(const ze::Camera::Ptr& camera) override; virtual void setCamera(const ze::Camera::Ptr& camera) override;
protected: protected:
void precomputePixelToBearingLookupTable();
void projectToPanorama(
const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
) const;
void precomputePixelToBearingLookupTable(); // Texture mapped on the plane
void projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const; Image texture_;
Transformation::Rotation R_W_P_;
// Texture mapped on the plane // Intrinsic parameters of the panorama camera
Image texture_; FloatType fx_, fy_;
Transformation::Rotation R_W_P_; ze::Keypoint principal_point_;
// Intrinsic parameters of the panorama camera // Precomputed lookup table from keypoint -> bearing vector
FloatType fx_, fy_; ze::Bearings bearings_C_;
ze::Keypoint principal_point_;
// Precomputed lookup table from keypoint -> bearing vector // Preallocated warping matrices
ze::Bearings bearings_C_; mutable cv::Mat_<float> map_x_, map_y_;
};
// Preallocated warping matrices
mutable cv::Mat_<float> map_x_, map_y_;
};
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,56 +4,68 @@
namespace event_camera_simulator { namespace event_camera_simulator {
//! A rendering engine for planar scenes //! A rendering engine for planar scenes
class PlanarRenderer : public Renderer class PlanarRenderer : public Renderer {
{ public:
public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
PlanarRenderer(const Image& texture, PlanarRenderer(
const Camera::Ptr& cam_src, const Image& texture,
const Transformation &T_W_P, const Camera::Ptr& cam_src,
FloatType z_min, const Transformation& T_W_P,
FloatType z_max, FloatType z_min,
bool extend_border); FloatType z_max,
bool extend_border
);
~PlanarRenderer(); ~PlanarRenderer();
//! Render image and depth map for a given camera pose //! Render image and depth map for a given camera pose
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const; virtual void render(
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const override const Transformation& T_W_C,
{ const ImagePtr& out_image,
render(T_W_C, out_image, out_depthmap); const DepthmapPtr& out_depthmap
} ) const;
//! Returns true if the rendering engine can compute optic flow, false otherwise void render(
virtual bool canComputeOpticFlow() const override { return false; } const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const override {
render(T_W_C, out_image, out_depthmap);
}
virtual void setCamera(const ze::Camera::Ptr& camera) override; //! Returns true if the rendering engine can compute optic flow, false
//! otherwise
virtual bool canComputeOpticFlow() const override {
return false;
}
protected: virtual void setCamera(const ze::Camera::Ptr& camera) override;
void precomputePixelToBearingLookupTable(); protected:
void precomputePixelToBearingLookupTable();
// Texture mapped on the plane // Texture mapped on the plane
Image texture_; Image texture_;
Camera::Ptr cam_src_; Camera::Ptr cam_src_;
CalibrationMatrix K_src_, K_src_inv_; CalibrationMatrix K_src_, K_src_inv_;
// Plane parameters // Plane parameters
Transformation T_W_P_; Transformation T_W_P_;
// Clipping parameters // Clipping parameters
FloatType z_min_; FloatType z_min_;
FloatType z_max_; FloatType z_max_;
bool extend_border_; bool extend_border_;
// Precomputed lookup table from keypoint -> bearing vector // Precomputed lookup table from keypoint -> bearing vector
ze::Bearings bearings_C_; ze::Bearings bearings_C_;
// Preallocated warping matrices // Preallocated warping matrices
mutable cv::Mat_<float> map_x_, map_y_; mutable cv::Mat_<float> map_x_, map_y_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,42 +1,47 @@
#pragma once #pragma once
#include <ze/common/macros.hpp>
#include <esim/common/types.hpp> #include <esim/common/types.hpp>
#include <ze/common/macros.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
//! Represents a rendering engine that generates images (and other outputs, //! Represents a rendering engine that generates images (and other outputs,
//! such as depth maps, or optical flow maps) given a scene and a camera position. //! such as depth maps, or optical flow maps) given a scene and a camera
class Renderer //! position.
{ class Renderer {
public: public:
ZE_POINTER_TYPEDEFS(Renderer); ZE_POINTER_TYPEDEFS(Renderer);
Renderer() {} Renderer() {}
//! Render an image at a given pose. //! Render an image at a given pose.
virtual void render(const Transformation& T_W_C, virtual void render(
const ImagePtr& out_image, const Transformation& T_W_C,
const DepthmapPtr& out_depthmap) const = 0; const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const = 0;
//! Render an image + depth map + optic flow map at a given pose, //! Render an image + depth map + optic flow map at a given pose,
//! given the camera linear and angular velocity //! given the camera linear and angular velocity
virtual void render(const Transformation& T_W_C, virtual void render(
const LinearVelocity& v_WC, const Transformation& T_W_C,
const AngularVelocity& w_WC, const LinearVelocity& v_WC,
const ImagePtr& out_image, const AngularVelocity& w_WC,
const DepthmapPtr& out_depthmap, const ImagePtr& out_image,
const OpticFlowPtr& optic_flow_map) const {} const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map
) const {}
//! Sets the camera
//! Sets the camera virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
//! Get the camera rig //! Get the camera rig
const ze::Camera::Ptr& getCamera() const { return camera_; } const ze::Camera::Ptr& getCamera() const {
return camera_;
}
protected: protected:
ze::Camera::Ptr camera_; ze::Camera::Ptr camera_;
}; };
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,11 +1,11 @@
#pragma once #pragma once
#include <gflags/gflags.h>
#include <esim/rendering/renderer_base.hpp> #include <esim/rendering/renderer_base.hpp>
#include <gflags/gflags.h>
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat *img); bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img);
Renderer::Ptr loadRendererFromGflags(); Renderer::Ptr loadRendererFromGflags();
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,113 +1,111 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/rendering/panorama_renderer.hpp> #include <esim/rendering/panorama_renderer.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp> #include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {
PanoramaRenderer::PanoramaRenderer(const Image& texture, PanoramaRenderer::PanoramaRenderer(
const Transformation::Rotation &R_W_P) const Image& texture, const Transformation::Rotation& R_W_P
: texture_(texture), )
R_W_P_(R_W_P) : texture_(texture),
{ R_W_P_(R_W_P) {
principal_point_ = ze::Keypoint(0.5 * texture_.cols, principal_point_ =
0.5 * texture_.rows); ze::Keypoint(0.5 * texture_.cols, 0.5 * texture_.rows);
fx_ = texture_.cols / (2.0 * CV_PI); fx_ = texture_.cols / (2.0 * CV_PI);
fy_ = texture_.rows / CV_PI; fy_ = texture_.rows / CV_PI;
LOG(INFO) << "Initialized panoramic camera with size: " LOG(INFO) << "Initialized panoramic camera with size: " << texture_.cols
<< texture_.cols << "x" << texture_.rows << "x" << texture_.rows
<< ", principal point: " << principal_point_.transpose() << ", principal point: " << principal_point_.transpose()
<< ", fx = " << fx_ << "px , fy = " << fy_ << " px"; << ", fx = " << fx_ << "px , fy = " << fy_ << " px";
}
PanoramaRenderer::~PanoramaRenderer()
{
}
void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera)
{
CHECK(camera);
camera_ = camera;
precomputePixelToBearingLookupTable();
// Preallocate memory for the warping maps
cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
map_y_ = cv::Mat_<float>::zeros(sensor_size);
}
void PanoramaRenderer::precomputePixelToBearingLookupTable()
{
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
} }
}
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
PanoramaRenderer::~PanoramaRenderer() {}
void PanoramaRenderer::projectToPanorama(const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint) const void PanoramaRenderer::setCamera(const ze::Camera::Ptr& camera) {
{ CHECK(camera);
CHECK(keypoint); camera_ = camera;
static constexpr FloatType kEpsilon = 1e-10;
const FloatType X = f[0]; precomputePixelToBearingLookupTable();
const FloatType Y = f[1];
const FloatType Z = f[2];
const FloatType rho2 = X*X+Y*Y+Z*Z; // Preallocate memory for the warping maps
const FloatType rho = std::sqrt(rho2); cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
CHECK_GE(rho, kEpsilon); map_y_ = cv::Mat_<float>::zeros(sensor_size);
const FloatType phi = std::atan2(X, Z);
const FloatType theta = std::asin(-Y/rho);
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
}
void PanoramaRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
{
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation::RotationMatrix R_P_C = R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
ze::Bearings bearings_P = R_P_C * bearings_C_;
ze::Keypoint keypoint;
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
// project bearing vector to panorama image
projectToPanorama(f, &keypoint);
map_x_(y,x) = static_cast<float>(keypoint[0]);
map_y_(y,x) = static_cast<float>(keypoint[1]);
} }
}
cv::remap(texture_, *out_image, map_x_, map_y_, CV_INTER_LINEAR, cv::BORDER_REFLECT_101); void PanoramaRenderer::precomputePixelToBearingLookupTable() {
// points_C is a matrix containing the coordinates of each pixel
// coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for (int y = 0; y < camera_->height(); ++y)
for (int x = 0; x < camera_->width(); ++x)
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
if(out_depthmap) void PanoramaRenderer::projectToPanorama(
{ const Eigen::Ref<const ze::Bearing>& f, ze::Keypoint* keypoint
static constexpr ImageFloatType infinity = 1e10; ) const {
out_depthmap->setTo(infinity); CHECK(keypoint);
} static constexpr FloatType kEpsilon = 1e-10;
}
const FloatType X = f[0];
const FloatType Y = f[1];
const FloatType Z = f[2];
const FloatType rho2 = X * X + Y * Y + Z * Z;
const FloatType rho = std::sqrt(rho2);
CHECK_GE(rho, kEpsilon);
const FloatType phi = std::atan2(X, Z);
const FloatType theta = std::asin(-Y / rho);
*keypoint = principal_point_ + ze::Keypoint(phi * fx_, -theta * fy_);
}
void PanoramaRenderer::render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const {
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation::RotationMatrix R_P_C =
R_W_P_.inverse().getRotationMatrix() * T_W_C.getRotationMatrix();
ze::Bearings bearings_P = R_P_C * bearings_C_;
ze::Keypoint keypoint;
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
// project bearing vector to panorama image
projectToPanorama(f, &keypoint);
map_x_(y, x) = static_cast<float>(keypoint[0]);
map_y_(y, x) = static_cast<float>(keypoint[1]);
}
}
cv::remap(
texture_,
*out_image,
map_x_,
map_y_,
CV_INTER_LINEAR,
cv::BORDER_REFLECT_101
);
if (out_depthmap) {
static constexpr ImageFloatType infinity = 1e10;
out_depthmap->setTo(infinity);
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,146 +1,158 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/imp_planar_renderer/planar_renderer.hpp> #include <esim/imp_planar_renderer/planar_renderer.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <glog/logging.h> #include <glog/logging.h>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp> #include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/timer_collection.hpp> #include <ze/common/timer_collection.hpp>
DECLARE_TIMER(TimerPlanarRenderer, timers_planar_renderer, DECLARE_TIMER(
compute_remap_maps, TimerPlanarRenderer,
inverse_homography, timers_planar_renderer,
remap, compute_remap_maps,
compute_depthmap inverse_homography,
); remap,
compute_depthmap
);
namespace event_camera_simulator { namespace event_camera_simulator {
PlanarRenderer::PlanarRenderer(const Image& texture, PlanarRenderer::PlanarRenderer(
const Camera::Ptr &cam_src, const Image& texture,
const Transformation& T_W_P, const Camera::Ptr& cam_src,
FloatType z_min, const Transformation& T_W_P,
FloatType z_max, FloatType z_min,
bool extend_border) FloatType z_max,
: texture_(texture), bool extend_border
cam_src_(cam_src), )
T_W_P_(T_W_P), : texture_(texture),
z_min_(z_min), cam_src_(cam_src),
z_max_(z_max), T_W_P_(T_W_P),
extend_border_(extend_border) z_min_(z_min),
{ z_max_(z_max),
K_src_ = calibrationMatrixFromCamera(cam_src_); extend_border_(extend_border) {
K_src_inv_ = K_src_.inverse(); K_src_ = calibrationMatrixFromCamera(cam_src_);
VLOG(1) << "K_src = " << K_src_; K_src_inv_ = K_src_.inverse();
VLOG(1) << "K_src = " << K_src_;
LOG(INFO) << "T_W_P = " << T_W_P_; LOG(INFO) << "T_W_P = " << T_W_P_;
}
PlanarRenderer::~PlanarRenderer()
{
timers_planar_renderer.saveToFile("/tmp", "planar_renderer.csv");
}
void PlanarRenderer::setCamera(const ze::Camera::Ptr& camera)
{
CHECK(camera);
camera_ = camera;
precomputePixelToBearingLookupTable();
// Preallocate memory for the warping maps
cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
map_y_ = cv::Mat_<float>::zeros(sensor_size);
}
void PlanarRenderer::precomputePixelToBearingLookupTable()
{
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
} }
}
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
void PlanarRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const PlanarRenderer::~PlanarRenderer() {
{ timers_planar_renderer.saveToFile("/tmp", "planar_renderer.csv");
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation T_C_W = T_W_C.inverse();
const Transformation T_C_P = T_C_W * T_W_P_;
const Transformation T_P_C = T_C_P.inverse();
ze::Matrix33 tmp;
tmp.col(0) = T_C_P.getRotationMatrix().col(0);
tmp.col(1) = T_C_P.getRotationMatrix().col(1);
tmp.col(2) = T_C_P.getPosition();
VLOG_EVERY_N(3, 100) << "T_C_P = " << T_C_P;
VLOG_EVERY_N(3, 100) << "tmp = " << tmp;
HomographyMatrix H_C_P = tmp;
HomographyMatrix H_P_C;
{
auto t = timers_planar_renderer[TimerPlanarRenderer::inverse_homography].timeScope();
H_P_C = H_C_P.inverse();
}
{
auto t = timers_planar_renderer[TimerPlanarRenderer::compute_remap_maps].timeScope();
ze::Bearings bearings_P = H_P_C * bearings_C_;
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const ze::Bearing& f = bearings_P.col(x + camera_->width() * y);
map_x_(y,x) = static_cast<float>(K_src_(0,0) * f[0]/f[2] + K_src_(0,2));
map_y_(y,x) = static_cast<float>(K_src_(1,1) * f[1]/f[2] + K_src_(1,2));
}
} }
}
int border = (extend_border_) ? cv::BORDER_REFLECT : cv::BORDER_CONSTANT; void PlanarRenderer::setCamera(const ze::Camera::Ptr& camera) {
{ CHECK(camera);
auto t = timers_planar_renderer[TimerPlanarRenderer::remap].timeScope(); camera_ = camera;
cv::remap(texture_, *out_image, map_x_, map_y_, cv::INTER_LINEAR, border, 0.8);
}
// Compute depth map in dst precomputePixelToBearingLookupTable();
{
auto t = timers_planar_renderer[TimerPlanarRenderer::compute_depthmap].timeScope();
Vector3 X;
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
X = bearings_C_.col(x + camera_->width() * y);
// @TODO: derive this formula for the depth to explain it
const FloatType numer = -T_P_C.getPosition()[2];
const FloatType denom = T_P_C.getRotationMatrix().row(2) * X;
const FloatType z = numer / denom;
if(out_depthmap) // Preallocate memory for the warping maps
cv::Size sensor_size(camera->width(), camera->height());
map_x_ = cv::Mat_<float>::zeros(sensor_size);
map_y_ = cv::Mat_<float>::zeros(sensor_size);
}
void PlanarRenderer::precomputePixelToBearingLookupTable() {
// points_C is a matrix containing the coordinates of each pixel
// coordinate in the image plane
ze::Keypoints points_C(2, camera_->width() * camera_->height());
for (int y = 0; y < camera_->height(); ++y)
for (int x = 0; x < camera_->width(); ++x)
points_C.col(x + camera_->width() * y) = ze::Keypoint(x, y);
bearings_C_ = camera_->backProjectVectorized(points_C);
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
}
void PlanarRenderer::render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const {
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
const Transformation T_C_W = T_W_C.inverse();
const Transformation T_C_P = T_C_W * T_W_P_;
const Transformation T_P_C = T_C_P.inverse();
ze::Matrix33 tmp;
tmp.col(0) = T_C_P.getRotationMatrix().col(0);
tmp.col(1) = T_C_P.getRotationMatrix().col(1);
tmp.col(2) = T_C_P.getPosition();
VLOG_EVERY_N(3, 100) << "T_C_P = " << T_C_P;
VLOG_EVERY_N(3, 100) << "tmp = " << tmp;
HomographyMatrix H_C_P = tmp;
HomographyMatrix H_P_C;
{ {
(*out_depthmap)(y,x) = (ImageFloatType) z; auto t =
timers_planar_renderer[TimerPlanarRenderer::inverse_homography]
.timeScope();
H_P_C = H_C_P.inverse();
} }
// clip depth to a reasonable depth range
if(z < z_min_ || z > z_max_)
{ {
(*out_image)(y,x) = 0; auto t =
timers_planar_renderer[TimerPlanarRenderer::compute_remap_maps]
.timeScope();
ze::Bearings bearings_P = H_P_C * bearings_C_;
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
const ze::Bearing& f =
bearings_P.col(x + camera_->width() * y);
map_x_(y, x) = static_cast<float>(
K_src_(0, 0) * f[0] / f[2] + K_src_(0, 2)
);
map_y_(y, x) = static_cast<float>(
K_src_(1, 1) * f[1] / f[2] + K_src_(1, 2)
);
}
}
}
int border =
(extend_border_) ? cv::BORDER_REFLECT : cv::BORDER_CONSTANT;
{
auto t =
timers_planar_renderer[TimerPlanarRenderer::remap].timeScope();
cv::remap(
texture_,
*out_image,
map_x_,
map_y_,
cv::INTER_LINEAR,
border,
0.8
);
}
// Compute depth map in dst
{
auto t =
timers_planar_renderer[TimerPlanarRenderer::compute_depthmap]
.timeScope();
Vector3 X;
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
X = bearings_C_.col(x + camera_->width() * y);
// @TODO: derive this formula for the depth to explain it
const FloatType numer = -T_P_C.getPosition()[2];
const FloatType denom =
T_P_C.getRotationMatrix().row(2) * X;
const FloatType z = numer / denom;
if (out_depthmap)
(*out_depthmap)(y, x) = (ImageFloatType) z;
// clip depth to a reasonable depth range
if (z < z_min_ || z > z_max_)
(*out_image)(y, x) = 0;
}
}
} }
}
} }
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,5 +1,3 @@
#include <esim/rendering/renderer_base.hpp> #include <esim/rendering/renderer_base.hpp>
namespace event_camera_simulator { namespace event_camera_simulator {} // namespace event_camera_simulator
} // namespace event_camera_simulator

View File

@ -1,164 +1,235 @@
#include <esim/common/utils.hpp> #include <esim/common/utils.hpp>
#include <esim/rendering/renderer_factory.hpp>
#include <esim/rendering/planar_renderer.hpp>
#include <esim/rendering/panorama_renderer.hpp> #include <esim/rendering/panorama_renderer.hpp>
#include <ze/cameras/camera_models.hpp> #include <esim/rendering/planar_renderer.hpp>
#include <ze/cameras/camera_impl.hpp> #include <esim/rendering/renderer_factory.hpp>
#include <ze/common/logging.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp> #include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <ze/cameras/camera_impl.hpp>
#include <ze/cameras/camera_models.hpp>
#include <ze/common/logging.hpp>
DEFINE_int32(renderer_type, 0, " 0: Planar renderer"); DEFINE_int32(renderer_type, 0, " 0: Planar renderer");
DEFINE_string(renderer_texture, "", DEFINE_string(
"Path to image which will be used to texture the plane"); renderer_texture,
"",
"Path to image which will be used to texture the plane"
);
DEFINE_double(renderer_hfov_cam_source_deg, 130.0, DEFINE_double(
"Horizontal FoV of the source camera (that captured the image on the plane)"); renderer_hfov_cam_source_deg,
130.0,
"Horizontal FoV of the source camera (that captured the image on "
"the plane)"
);
DEFINE_double(renderer_preprocess_median_blur, 0, DEFINE_double(
"Kernel size of the preprocessing median blur."); renderer_preprocess_median_blur,
0,
"Kernel size of the preprocessing median blur."
);
DEFINE_double(renderer_preprocess_gaussian_blur, 0, DEFINE_double(
"Amount of preprocessing Gaussian blur."); renderer_preprocess_gaussian_blur,
0,
"Amount of preprocessing Gaussian blur."
);
DEFINE_double(renderer_plane_x, 0.0, DEFINE_double(
"x position of the center of the plane, in world coordinates"); renderer_plane_x,
0.0,
"x position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_y, 0.0, DEFINE_double(
"y position of the center of the plane, in world coordinates"); renderer_plane_y,
0.0,
"y position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_z, -1.0, DEFINE_double(
"z position of the center of the plane, in world coordinates"); renderer_plane_z,
-1.0,
"z position of the center of the plane, in world coordinates"
);
DEFINE_double(renderer_plane_qw, 0.0, DEFINE_double(
"w component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qw,
0.0,
"w component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qx, 1.0, DEFINE_double(
"x component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qx,
1.0,
"x component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qy, 0.0, DEFINE_double(
"y component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qy,
0.0,
"y component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_plane_qz, 0.0, DEFINE_double(
"z component of the quaternion q_W_P (orientation of the plane with respect to the world)"); renderer_plane_qz,
0.0,
"z component of the quaternion q_W_P (orientation of the plane "
"with respect to the world)"
);
DEFINE_double(renderer_z_min, 0.01, DEFINE_double(renderer_z_min, 0.01, "Minimum clipping distance.");
"Minimum clipping distance.");
DEFINE_double(renderer_z_max, 10.0, DEFINE_double(renderer_z_max, 10.0, "Maximum clipping distance.");
"Maximum clipping distance.");
DEFINE_bool(renderer_extend_border, false, DEFINE_bool(
"Whether to extend the borders of the plane to infinity or not."); renderer_extend_border,
false,
"Whether to extend the borders of the plane to infinity or not."
);
DEFINE_double(renderer_panorama_qw, 0.0, DEFINE_double(
"w component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qw,
0.0,
"w component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qx, 1.0, DEFINE_double(
"x component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qx,
1.0,
"x component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qy, 0.0, DEFINE_double(
"y component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qy,
0.0,
"y component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
DEFINE_double(renderer_panorama_qz, 0.0, DEFINE_double(
"z component of the quaternion q_W_P (orientation of the panorama with respect to the world)"); renderer_panorama_qz,
0.0,
"z component of the quaternion q_W_P (orientation of the "
"panorama with respect to the world)"
);
namespace event_camera_simulator { namespace event_camera_simulator {
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img) bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img) {
{ CHECK(img);
CHECK(img); VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture
VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture << "."; << ".";
*img = cv::imread(path_to_img, 0); *img = cv::imread(path_to_img, 0);
if(!img->data) if (!img->data) {
{ LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture
LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture << "."; << ".";
return false; return false;
} }
if(FLAGS_renderer_preprocess_median_blur > 1) if (FLAGS_renderer_preprocess_median_blur > 1) {
{ VLOG(1) << "Pre-filtering the texture with median filter of size: "
VLOG(1) << "Pre-filtering the texture with median filter of size: " << FLAGS_renderer_preprocess_median_blur << ".";
<< FLAGS_renderer_preprocess_median_blur << "."; cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur);
cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur); }
}
if(FLAGS_renderer_preprocess_gaussian_blur > 0) if (FLAGS_renderer_preprocess_gaussian_blur > 0) {
{ VLOG(1
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: " ) << "Pre-filtering the texture with gaussian filter of size: "
<< FLAGS_renderer_preprocess_gaussian_blur << "."; << FLAGS_renderer_preprocess_gaussian_blur << ".";
cv::GaussianBlur(*img, *img, cv::Size(15,15), FLAGS_renderer_preprocess_gaussian_blur); cv::GaussianBlur(
} *img,
*img,
cv::Size(15, 15),
FLAGS_renderer_preprocess_gaussian_blur
);
}
img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0/255.0); img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0 / 255.0);
return true; return true;
}
Renderer::Ptr loadRendererFromGflags()
{
Renderer::Ptr renderer;
switch (FLAGS_renderer_type)
{
case 0: // Planar renderer
{
cv::Mat img_src;
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
{
return nullptr;
}
double f_src = hfovToFocalLength(FLAGS_renderer_hfov_cam_source_deg, img_src.cols);
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
img_src.cols, img_src.rows, ze::CameraType::Pinhole,
(Vector4() << f_src, f_src, 0.5 * img_src.cols, 0.5 * img_src.rows).finished(),
ze::VectorX());
Transformation T_W_P;
T_W_P.getPosition() = ze::Position(FLAGS_renderer_plane_x,
FLAGS_renderer_plane_y,
FLAGS_renderer_plane_z);
T_W_P.getRotation() = ze::Quaternion(FLAGS_renderer_plane_qw,
FLAGS_renderer_plane_qx,
FLAGS_renderer_plane_qy,
FLAGS_renderer_plane_qz);
renderer.reset(new PlanarRenderer(img_src, cam_src,
T_W_P,
FLAGS_renderer_z_min,
FLAGS_renderer_z_max,
FLAGS_renderer_extend_border));
break;
} }
case 1: // Panorama renderer
{
cv::Mat img_src;
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
{
return nullptr;
}
Transformation::Rotation R_W_P; Renderer::Ptr loadRendererFromGflags() {
R_W_P = ze::Quaternion(FLAGS_renderer_panorama_qw, Renderer::Ptr renderer;
FLAGS_renderer_panorama_qx,
FLAGS_renderer_panorama_qy,
FLAGS_renderer_panorama_qz);
renderer.reset(new PanoramaRenderer(img_src, R_W_P)); switch (FLAGS_renderer_type) {
break; case 0: // Planar renderer
{
cv::Mat img_src;
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
return nullptr;
double f_src = hfovToFocalLength(
FLAGS_renderer_hfov_cam_source_deg,
img_src.cols
);
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
img_src.cols,
img_src.rows,
ze::CameraType::Pinhole,
(Vector4() << f_src,
f_src,
0.5 * img_src.cols,
0.5 * img_src.rows)
.finished(),
ze::VectorX()
);
Transformation T_W_P;
T_W_P.getPosition() = ze::Position(
FLAGS_renderer_plane_x,
FLAGS_renderer_plane_y,
FLAGS_renderer_plane_z
);
T_W_P.getRotation() = ze::Quaternion(
FLAGS_renderer_plane_qw,
FLAGS_renderer_plane_qx,
FLAGS_renderer_plane_qy,
FLAGS_renderer_plane_qz
);
renderer.reset(new PlanarRenderer(
img_src,
cam_src,
T_W_P,
FLAGS_renderer_z_min,
FLAGS_renderer_z_max,
FLAGS_renderer_extend_border
));
break;
}
case 1: // Panorama renderer
{
cv::Mat img_src;
if (!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
return nullptr;
Transformation::Rotation R_W_P;
R_W_P = ze::Quaternion(
FLAGS_renderer_panorama_qw,
FLAGS_renderer_panorama_qx,
FLAGS_renderer_panorama_qy,
FLAGS_renderer_panorama_qz
);
renderer.reset(new PanoramaRenderer(img_src, R_W_P));
break;
}
default: {
LOG(FATAL) << "Renderer type is not known.";
break;
}
}
return renderer;
} }
default:
{
LOG(FATAL) << "Renderer type is not known.";
break;
}
}
return renderer;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -4,31 +4,39 @@
namespace event_camera_simulator { namespace event_camera_simulator {
class UnrealCvClient; // fwd class UnrealCvClient; // fwd
class UnrealCvRenderer : public Renderer class UnrealCvRenderer : public Renderer {
{ public:
public: UnrealCvRenderer();
UnrealCvRenderer(); //! Render image and depth map for a given camera pose
virtual void render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const;
//! Render image and depth map for a given camera pose void render(
virtual void render(const Transformation& T_W_C, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const; const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const {
render(T_W_C, out_image, out_depthmap);
}
void render(const Transformation& T_W_C, const std::vector<Transformation>& T_W_OBJ, const ImagePtr &out_image, const DepthmapPtr &out_depthmap) const //! Returns true if the rendering engine can compute optic flow, false
{ //! otherwise
render(T_W_C, out_image, out_depthmap); virtual bool canComputeOpticFlow() const override {
} return false;
}
//! Returns true if the rendering engine can compute optic flow, false otherwise virtual void setCamera(const ze::Camera::Ptr& camera) override;
virtual bool canComputeOpticFlow() const override { return false; }
virtual void setCamera(const ze::Camera::Ptr& camera) override; private:
std::shared_ptr<UnrealCvClient> client_;
mutable size_t frame_idx_;
};
private: } // namespace event_camera_simulator
std::shared_ptr<UnrealCvClient> client_;
mutable size_t frame_idx_;
};
}

View File

@ -4,85 +4,86 @@
namespace event_camera_simulator { namespace event_camera_simulator {
/** /**
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L580 * https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L580
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees * Helper function for eulerFromQuatSingularityTest, angles are expected to
**/ *be given in degrees
inline FloatType clampAxis(FloatType angle) **/
{ inline FloatType clampAxis(FloatType angle) {
// returns angle in the range (-360,360) // returns angle in the range (-360,360)
angle = std::fmod(angle, 360.f); angle = std::fmod(angle, 360.f);
if (angle < 0.f) if (angle < 0.f) {
{ // shift to [0,360) range
// shift to [0,360) range angle += 360.f;
angle += 360.f; }
}
return angle; return angle;
} }
/** /**
* https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L595$ * https://github.com/EpicGames/UnrealEngine/blob/dbced2dd59f9f5dfef1d7786fd67ad2970adf95f/Engine/Source/Runtime/Core/Public/Math/Rotator.h#L595$
* Helper function for eulerFromQuatSingularityTest, angles are expected to be given in degrees * Helper function for eulerFromQuatSingularityTest, angles are expected to
**/ *be given in degrees
inline FloatType normalizeAxis(FloatType angle) **/
{ inline FloatType normalizeAxis(FloatType angle) {
angle = clampAxis(angle); angle = clampAxis(angle);
if(angle > 180.f) if (angle > 180.f) {
{ // shift to (-180,180]
// shift to (-180,180] angle -= 360.f;
angle -= 360.f; }
}
return angle; return angle;
} }
/**
*
* https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540
* Quaternion given in (x,y,z,w) representation
**/
void quaternionToEulerUnrealEngine(
const Transformation::Rotation& q,
FloatType& yaw,
FloatType& pitch,
FloatType& roll
) {
const FloatType X = q.x();
const FloatType Y = q.y();
const FloatType Z = q.z();
const FloatType W = q.w();
/** const FloatType SingularityTest = Z * X - W * Y;
* const FloatType YawY = 2.f * (W * Z + X * Y);
* https://github.com/EpicGames/UnrealEngine/blob/f794321ffcad597c6232bc706304c0c9b4e154b2/Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp#L540 const FloatType YawX = (1.f - 2.f * (Y * Y + Z * Z));
* Quaternion given in (x,y,z,w) representation
**/
void quaternionToEulerUnrealEngine(const Transformation::Rotation& q, FloatType& yaw, FloatType& pitch, FloatType& roll)
{
const FloatType X = q.x();
const FloatType Y = q.y();
const FloatType Z = q.z();
const FloatType W = q.w();
const FloatType SingularityTest = Z*X-W*Y; // reference
const FloatType YawY = 2.f*(W*Z+X*Y); // http://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
const FloatType YawX = (1.f-2.f*(Y*Y + Z*Z)); // http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/
// reference // this value was found from experience, the above websites recommend
// http://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles // different values but that isn't the case for us, so I went through
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/ // different testing, and finally found the case where both of world
// lives happily.
const FloatType SINGULARITY_THRESHOLD = 0.4999995;
const FloatType RAD_TO_DEG = (180.0) / CV_PI;
// this value was found from experience, the above websites recommend different values if (SingularityTest < -SINGULARITY_THRESHOLD) {
// but that isn't the case for us, so I went through different testing, and finally found the case pitch = -90.;
// where both of world lives happily. yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
const FloatType SINGULARITY_THRESHOLD = 0.4999995; roll = normalizeAxis(-yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
const FloatType RAD_TO_DEG = (180.0)/CV_PI; } else if (SingularityTest > SINGULARITY_THRESHOLD) {
pitch = 90.;
if (SingularityTest < -SINGULARITY_THRESHOLD) yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
{ roll = normalizeAxis(yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG));
pitch = -90.; } else {
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG; pitch = std::asin(2.f * (SingularityTest)) * RAD_TO_DEG;
roll = normalizeAxis(-yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG)); yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
} roll = std::atan2(
else if (SingularityTest > SINGULARITY_THRESHOLD) -2.f * (W * X + Y * Z),
{ (1.f - 2.f * (X * X + Y * Y))
pitch = 90.; )
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG; * RAD_TO_DEG;
roll = normalizeAxis(yaw - (2.f * std::atan2(X, W) * RAD_TO_DEG)); }
} }
else
{
pitch = std::asin(2.f*(SingularityTest)) * RAD_TO_DEG;
yaw = std::atan2(YawY, YawX) * RAD_TO_DEG;
roll = std::atan2(-2.f*(W*X+Y*Z), (1.f-2.f*(X*X + Y*Y))) * RAD_TO_DEG;
}
}
} // namespace event_camera_simulator } // namespace event_camera_simulator

View File

@ -1,10 +1,10 @@
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp> #include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
#include <esim/imp_unrealcv_renderer/utils.hpp> #include <esim/imp_unrealcv_renderer/utils.hpp>
#include <opencv2/imgproc/imgproc.hpp> #include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <ze/common/path_utils.hpp>
#include <iomanip> #include <iomanip>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/path_utils.hpp>
DEFINE_double(x_offset, 0, "X offset of the trajectory, in meters"); DEFINE_double(x_offset, 0, "X offset of the trajectory, in meters");
DEFINE_double(y_offset, 0, "Y offset of the trajectory, in meters"); DEFINE_double(y_offset, 0, "Y offset of the trajectory, in meters");
@ -13,153 +13,184 @@ DECLARE_double(trajectory_multiplier_x);
DECLARE_double(trajectory_multiplier_y); DECLARE_double(trajectory_multiplier_y);
DECLARE_double(trajectory_multiplier_z); DECLARE_double(trajectory_multiplier_z);
DEFINE_int32(unrealcv_post_median_blur, 0, DEFINE_int32(
"If > 0, median blur the raw UnrealCV images" unrealcv_post_median_blur,
"with a median filter of this size"); 0,
"If > 0, median blur the raw UnrealCV images"
"with a median filter of this size"
);
DEFINE_double(unrealcv_post_gaussian_blur_sigma, 0, DEFINE_double(
"If sigma > 0, Gaussian blur the raw UnrealCV images" unrealcv_post_gaussian_blur_sigma,
"with a Gaussian filter standard deviation sigma."); 0,
"If sigma > 0, Gaussian blur the raw UnrealCV images"
"with a Gaussian filter standard deviation sigma."
);
DEFINE_string(unrealcv_output_directory, "", DEFINE_string(
"Output directory in which to output the raw RGB images."); unrealcv_output_directory,
"",
"Output directory in which to output the raw RGB images."
);
namespace event_camera_simulator { namespace event_camera_simulator {
UnrealCvRenderer::UnrealCvRenderer() UnrealCvRenderer::UnrealCvRenderer() {
{ client_ = std::make_shared<UnrealCvClient>("localhost", "9000");
client_ = std::make_shared<UnrealCvClient>("localhost", "9000"); frame_idx_ = 0;
frame_idx_ = 0;
}
void UnrealCvRenderer::setCamera(const ze::Camera::Ptr& camera)
{
camera_ = camera;
// compute the horizontal field of view of the camera
ze::VectorX intrinsics = camera_->projectionParameters();
const FloatType fx = intrinsics(0);
const FloatType hfov_deg = 2 * std::atan(0.5 * (FloatType) camera_->width() / fx) * 180. / CV_PI;
client_->setCameraFOV(static_cast<float>(hfov_deg));
client_->setCameraSize(camera->width(), camera->height());
}
void UnrealCvRenderer::render(const Transformation& T_W_C, const ImagePtr& out_image, const DepthmapPtr& out_depthmap) const
{
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
VLOG(1) << "T_W_C (ZE) = " << T_W_C;
const Transformation::TransformationMatrix mT_ZE_C = T_W_C.getTransformationMatrix();
Transformation::TransformationMatrix mT_UE_ZE;
mT_UE_ZE << 1, 0, 0, 0,
0, -1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
Transformation::TransformationMatrix mT_C_UEC;
mT_C_UEC << 0, 1, 0, 0,
0, 0, -1, 0,
1, 0, 0, 0,
0, 0, 0, 1;
// rotate 90 deg to the right so that R_W_C = Identity <=> euler angle (0,0,0) in UnrealCV
// Transformation::TransformationMatrix mT_rotate_90_right;
// mT_rotate_90_right << 0, -1, 0, 0,
// 1, 0, 0, 0,
// 0, 0, 1, 0,
// 0, 0, 0, 1;
// mT_rotate_90_right << 1, 0, 0, 0,
// 0, 1, 0, 0,
// 0, 0, 1, 0,
// 0, 0, 0, 1;
// const Transformation::TransformationMatrix mT_UE_UEC = mT_rotate_90_right * mT_UE_ZE * mT_ZE_C * mT_C_UEC;
const Transformation::TransformationMatrix mT_UE_UEC = mT_UE_ZE * mT_ZE_C * mT_C_UEC;
const Transformation T_UE_UEC(mT_UE_UEC);
VLOG(1) << "T_ZE_C = " << mT_ZE_C;
VLOG(1) << "T_UE_UEC = " << T_UE_UEC;
FloatType yaw, pitch, roll;
quaternionToEulerUnrealEngine(T_UE_UEC.getRotation(), yaw, pitch, roll);
const FloatType x_offset = static_cast<FloatType>(FLAGS_x_offset);
const FloatType y_offset = static_cast<FloatType>(FLAGS_y_offset);
const FloatType z_offset = static_cast<FloatType>(FLAGS_z_offset);
const FloatType x = 100. * (FLAGS_trajectory_multiplier_x * T_UE_UEC.getPosition()[0] + x_offset);
const FloatType y = 100. * (FLAGS_trajectory_multiplier_y * T_UE_UEC.getPosition()[1] + y_offset);
const FloatType z = 100. * (FLAGS_trajectory_multiplier_z * T_UE_UEC.getPosition()[2] + z_offset);
VLOG(1) << yaw << " " << pitch << " " << roll;
CameraData cam_data = {0,
pitch,
yaw,
roll,
x,
y,
z};
client_->setCamera(cam_data);
cv::Mat img = client_->getImage(0);
VLOG(5) << "Got image from the UnrealCV client";
// (optionally) save raw RGB image to the output directory
if(FLAGS_unrealcv_output_directory != "")
{
std::stringstream ss_nr;
ss_nr << std::setw(10) << std::setfill('0') << frame_idx_;
std::string path_frame = ze::joinPath(FLAGS_unrealcv_output_directory, "frame_" + ss_nr.str() + ".png");
VLOG(1) << "Saving raw RGB image to: " << path_frame;
cv::imwrite(path_frame, img, {cv::IMWRITE_PNG_COMPRESSION, 9});
}
cv::Mat img_gray;
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
if(FLAGS_unrealcv_post_median_blur > 0)
{
cv::medianBlur(img_gray, img_gray, FLAGS_unrealcv_post_median_blur);
}
if(FLAGS_unrealcv_post_gaussian_blur_sigma > 0)
{
cv::GaussianBlur(img_gray, img_gray, cv::Size(-1,-1), FLAGS_unrealcv_post_gaussian_blur_sigma);
}
cv::resize(img_gray, img_gray, cv::Size(camera_->width(), camera_->height()));
img_gray.convertTo(*out_image, cv::DataType<ImageFloatType>::type, 1./255.);
cv::Mat depth = client_->getDepth(0);
VLOG(5) << "Got depth map from the UnrealCV client";
CHECK_EQ(depth.type(), CV_32F);
cv::resize(depth, depth, cv::Size(camera_->width(), camera_->height()));
depth.convertTo(*out_depthmap, cv::DataType<ImageFloatType>::type);
// the depth provided by the unrealcv client is the distance from the scene to the camera center,
// we need to convert it to the distance to image plane (see Github issue: https://github.com/unrealcv/unrealcv/issues/14)
const ImageFloatType yc = 0.5 * static_cast<ImageFloatType>(camera_->height()) - 1.0;
const ImageFloatType xc = 0.5 * static_cast<ImageFloatType>(camera_->width()) - 1.0;
const ImageFloatType f = static_cast<ImageFloatType>(camera_->projectionParameters()(0));
for(int y=0; y<camera_->height(); ++y)
{
for(int x=0; x<camera_->width(); ++x)
{
const ImageFloatType point_depth = (*out_depthmap)(y,x);
const ImageFloatType dx = static_cast<ImageFloatType>(x) - xc;
const ImageFloatType dy = static_cast<ImageFloatType>(y) - yc;
const ImageFloatType distance_from_center = std::sqrt(dx*dx + dy*dy);
const ImageFloatType plane_depth = point_depth / std::sqrt(1.0 + std::pow(distance_from_center / f, 2));
(*out_depthmap)(y,x) = plane_depth;
} }
}
frame_idx_++; void UnrealCvRenderer::setCamera(const ze::Camera::Ptr& camera) {
} camera_ = camera;
// compute the horizontal field of view of the camera
ze::VectorX intrinsics = camera_->projectionParameters();
const FloatType fx = intrinsics(0);
const FloatType hfov_deg =
2 * std::atan(0.5 * (FloatType) camera_->width() / fx) * 180.
/ CV_PI;
client_->setCameraFOV(static_cast<float>(hfov_deg));
client_->setCameraSize(camera->width(), camera->height());
}
void UnrealCvRenderer::render(
const Transformation& T_W_C,
const ImagePtr& out_image,
const DepthmapPtr& out_depthmap
) const {
CHECK_EQ(out_image->rows, camera_->height());
CHECK_EQ(out_image->cols, camera_->width());
VLOG(1) << "T_W_C (ZE) = " << T_W_C;
const Transformation::TransformationMatrix mT_ZE_C =
T_W_C.getTransformationMatrix();
Transformation::TransformationMatrix mT_UE_ZE;
mT_UE_ZE << 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
Transformation::TransformationMatrix mT_C_UEC;
mT_C_UEC << 0, 1, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, 1;
// rotate 90 deg to the right so that R_W_C = Identity <=> euler angle
// (0,0,0) in UnrealCV
// Transformation::TransformationMatrix mT_rotate_90_right;
// mT_rotate_90_right << 0, -1, 0, 0,
// 1, 0, 0, 0,
// 0, 0, 1, 0,
// 0, 0, 0, 1;
// mT_rotate_90_right << 1, 0, 0, 0,
// 0, 1, 0, 0,
// 0, 0, 1, 0,
// 0, 0, 0, 1;
// const Transformation::TransformationMatrix mT_UE_UEC =
// mT_rotate_90_right
// * mT_UE_ZE * mT_ZE_C * mT_C_UEC;
const Transformation::TransformationMatrix mT_UE_UEC =
mT_UE_ZE * mT_ZE_C * mT_C_UEC;
const Transformation T_UE_UEC(mT_UE_UEC);
VLOG(1) << "T_ZE_C = " << mT_ZE_C;
VLOG(1) << "T_UE_UEC = " << T_UE_UEC;
FloatType yaw, pitch, roll;
quaternionToEulerUnrealEngine(T_UE_UEC.getRotation(), yaw, pitch, roll);
const FloatType x_offset = static_cast<FloatType>(FLAGS_x_offset);
const FloatType y_offset = static_cast<FloatType>(FLAGS_y_offset);
const FloatType z_offset = static_cast<FloatType>(FLAGS_z_offset);
const FloatType x =
100.
* (FLAGS_trajectory_multiplier_x * T_UE_UEC.getPosition()[0]
+ x_offset);
const FloatType y =
100.
* (FLAGS_trajectory_multiplier_y * T_UE_UEC.getPosition()[1]
+ y_offset);
const FloatType z =
100.
* (FLAGS_trajectory_multiplier_z * T_UE_UEC.getPosition()[2]
+ z_offset);
VLOG(1) << yaw << " " << pitch << " " << roll;
CameraData cam_data = {0, pitch, yaw, roll, x, y, z};
client_->setCamera(cam_data);
cv::Mat img = client_->getImage(0);
VLOG(5) << "Got image from the UnrealCV client";
// (optionally) save raw RGB image to the output directory
if (FLAGS_unrealcv_output_directory != "") {
std::stringstream ss_nr;
ss_nr << std::setw(10) << std::setfill('0') << frame_idx_;
std::string path_frame = ze::joinPath(
FLAGS_unrealcv_output_directory,
"frame_" + ss_nr.str() + ".png"
);
VLOG(1) << "Saving raw RGB image to: " << path_frame;
cv::imwrite(path_frame, img, {cv::IMWRITE_PNG_COMPRESSION, 9});
}
cv::Mat img_gray;
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
if (FLAGS_unrealcv_post_median_blur > 0)
cv::medianBlur(img_gray, img_gray, FLAGS_unrealcv_post_median_blur);
if (FLAGS_unrealcv_post_gaussian_blur_sigma > 0) {
cv::GaussianBlur(
img_gray,
img_gray,
cv::Size(-1, -1),
FLAGS_unrealcv_post_gaussian_blur_sigma
);
}
cv::resize(
img_gray,
img_gray,
cv::Size(camera_->width(), camera_->height())
);
img_gray.convertTo(
*out_image,
cv::DataType<ImageFloatType>::type,
1. / 255.
);
cv::Mat depth = client_->getDepth(0);
VLOG(5) << "Got depth map from the UnrealCV client";
CHECK_EQ(depth.type(), CV_32F);
cv::resize(depth, depth, cv::Size(camera_->width(), camera_->height()));
depth.convertTo(*out_depthmap, cv::DataType<ImageFloatType>::type);
// the depth provided by the unrealcv client is the distance from the
// scene to the camera center, we need to convert it to the distance to
// image plane (see Github issue:
// https://github.com/unrealcv/unrealcv/issues/14)
const ImageFloatType yc =
0.5 * static_cast<ImageFloatType>(camera_->height()) - 1.0;
const ImageFloatType xc =
0.5 * static_cast<ImageFloatType>(camera_->width()) - 1.0;
const ImageFloatType f =
static_cast<ImageFloatType>(camera_->projectionParameters()(0));
for (int y = 0; y < camera_->height(); ++y) {
for (int x = 0; x < camera_->width(); ++x) {
const ImageFloatType point_depth = (*out_depthmap)(y, x);
const ImageFloatType dx = static_cast<ImageFloatType>(x) - xc;
const ImageFloatType dy = static_cast<ImageFloatType>(y) - yc;
const ImageFloatType distance_from_center =
std::sqrt(dx * dx + dy * dy);
const ImageFloatType plane_depth =
point_depth
/ std::sqrt(1.0 + std::pow(distance_from_center / f, 2));
(*out_depthmap)(y, x) = plane_depth;
}
}
frame_idx_++;
}
} // namespace event_camera_simulator } // namespace event_camera_simulator