Reformated the project to make it more readable (to me)

This commit is contained in:
Arnaud Fauconnet
2023-07-17 11:45:28 +02:00
parent de6743207d
commit 4738ae7444
66 changed files with 6713 additions and 5880 deletions

View File

@ -1,72 +1,74 @@
#pragma once
#include <esim/common/types.hpp>
#include <deque>
#include <esim/common/types.hpp>
#include <ze/common/time_conversions.hpp>
namespace event_camera_simulator {
class ImageBuffer
{
public:
ZE_POINTER_TYPEDEFS(ImageBuffer);
class ImageBuffer {
public:
ZE_POINTER_TYPEDEFS(ImageBuffer);
struct ImageData
{
ImageData(Image img, Time stamp, Duration exposure_time)
: image(img),
stamp(stamp),
exposure_time(exposure_time) {}
struct ImageData {
ImageData(Image img, Time stamp, Duration exposure_time)
: image(img),
stamp(stamp),
exposure_time(exposure_time) {}
Image image;
Time stamp;
Duration exposure_time; // timestamp since last image (0 if this is the first image)
};
Image image;
Time stamp;
Duration exposure_time; // timestamp since last image (0 if this is
// the first image)
};
using ExposureImage = std::pair<Duration, Image>;
using ExposureImage = std::pair<Duration, Image>;
// Rolling image buffer of mazimum size 'buffer_size_ns'.
ImageBuffer(Duration buffer_size_ns)
: buffer_size_ns_(buffer_size_ns) {}
// Rolling image buffer of mazimum size 'buffer_size_ns'.
ImageBuffer(Duration buffer_size_ns): buffer_size_ns_(buffer_size_ns) {}
void addImage(Time t, const Image& img);
void addImage(Time t, const Image& img);
std::deque<ImageData> getRawBuffer() const { return data_; }
std::deque<ImageData> getRawBuffer() const {
return data_;
}
size_t size() const { return data_.size(); }
size_t size() const {
return data_.size();
}
Duration getExposureTime() const { return buffer_size_ns_; }
Duration getExposureTime() const {
return buffer_size_ns_;
}
private:
Duration buffer_size_ns_;
std::deque<ImageData> data_;
};
private:
Duration buffer_size_ns_;
std::deque<ImageData> data_;
};
/*
* The CameraSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate and with
* floating-point precision, and treats each image as a measure of
* irradiance.
* From this, it simulates a real camera, including motion blur.
*
* @TODO: simulate a non-linear camera response curve, shot noise, etc.
*/
class CameraSimulator {
public:
CameraSimulator(double exposure_time_ms)
: exposure_time_(ze::secToNanosec(exposure_time_ms / 1000.0)) {
buffer_.reset(new ImageBuffer(exposure_time_));
}
/*
* The CameraSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate and with
* floating-point precision, and treats each image as a measure of
* irradiance.
* From this, it simulates a real camera, including motion blur.
*
* @TODO: simulate a non-linear camera response curve, shot noise, etc.
*/
class CameraSimulator
{
public:
CameraSimulator(double exposure_time_ms)
: exposure_time_(ze::secToNanosec(exposure_time_ms / 1000.0))
{
buffer_.reset(new ImageBuffer(exposure_time_));
}
bool imageCallback(
const Image& img, Time time, const ImagePtr& camera_image
);
bool imageCallback(const Image& img, Time time,
const ImagePtr &camera_image);
private:
ImageBuffer::Ptr buffer_;
const Duration exposure_time_;
};
private:
ImageBuffer::Ptr buffer_;
const Duration exposure_time_;
};
} // namespace event_camera_simulator

View File

@ -4,51 +4,47 @@
namespace event_camera_simulator {
/*
* The EventSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate,
* and simulates the principle of operation of an idea event camera
* with a constant contrast threshold C.
* Pixel-wise intensity values are linearly interpolated in time.
*
* The pixel-wise voltages are reset with the values from the first image
* which is passed to the simulator.
*/
class EventSimulator
{
public:
/*
* The EventSimulator takes as input a sequence of stamped images,
* assumed to be sampled at a "sufficiently high" framerate,
* and simulates the principle of operation of an idea event camera
* with a constant contrast threshold C.
* Pixel-wise intensity values are linearly interpolated in time.
*
* The pixel-wise voltages are reset with the values from the first image
* which is passed to the simulator.
*/
class EventSimulator {
public:
struct Config {
double Cp;
double Cm;
double sigma_Cp;
double sigma_Cm;
Duration refractory_period_ns;
bool use_log_image;
double log_eps;
};
struct Config
{
double Cp;
double Cm;
double sigma_Cp;
double sigma_Cm;
Duration refractory_period_ns;
bool use_log_image;
double log_eps;
};
using TimestampImage = cv::Mat_<ze::real_t>;
using TimestampImage = cv::Mat_<ze::real_t>;
EventSimulator(const Config& config)
: config_(config),
is_initialized_(false),
current_time_(0) {}
EventSimulator(const Config& config)
: config_(config),
is_initialized_(false),
current_time_(0)
{}
void init(const Image& img, Time time);
Events imageCallback(const Image& img, Time time);
void init(const Image& img, Time time);
Events imageCallback(const Image& img, Time time);
private:
bool is_initialized_;
Time current_time_;
Image ref_values_;
Image last_img_;
TimestampImage last_event_timestamp_;
cv::Size size_;
private:
bool is_initialized_;
Time current_time_;
Image ref_values_;
Image last_img_;
TimestampImage last_event_timestamp_;
cv::Size size_;
Config config_;
};
Config config_;
};
} // namespace event_camera_simulator

View File

@ -1,60 +1,60 @@
#pragma once
#include <esim/esim/event_simulator.hpp>
#include <esim/esim/camera_simulator.hpp>
#include <esim/esim/event_simulator.hpp>
#include <esim/visualization/publisher_interface.hpp>
namespace event_camera_simulator {
/* The Simulator forwards the simulated images / depth maps
* from the data provider to multiple, specialized camera simulators, such as:
* (i) event camera simulators that simulate events based on sequences of images
* (ii) camera simulators that simulate real cameras
* (including motion blur, camera response function, noise, etc.)
*
* The Simulator then forwards the simulated data to one or more publishers.
*/
class Simulator
{
public:
Simulator(size_t num_cameras,
/* The Simulator forwards the simulated images / depth maps
* from the data provider to multiple, specialized camera simulators, such
* as: (i) event camera simulators that simulate events based on sequences
* of images (ii) camera simulators that simulate real cameras (including
* motion blur, camera response function, noise, etc.)
*
* The Simulator then forwards the simulated data to one or more publishers.
*/
class Simulator {
public:
Simulator(
size_t num_cameras,
const EventSimulator::Config& event_sim_config,
double exposure_time_ms)
: num_cameras_(num_cameras),
exposure_time_(ze::millisecToNanosec(exposure_time_ms))
{
for(size_t i=0; i<num_cameras_; ++i)
{
event_simulators_.push_back(EventSimulator(event_sim_config));
camera_simulators_.push_back(CameraSimulator(exposure_time_ms));
}
}
double exposure_time_ms
)
: num_cameras_(num_cameras),
exposure_time_(ze::millisecToNanosec(exposure_time_ms)) {
for (size_t i = 0; i < num_cameras_; ++i) {
event_simulators_.push_back(EventSimulator(event_sim_config));
camera_simulators_.push_back(CameraSimulator(exposure_time_ms));
}
}
~Simulator();
~Simulator();
void addPublisher(const Publisher::Ptr& publisher)
{
CHECK(publisher);
publishers_.push_back(std::move(publisher));
}
void addPublisher(const Publisher::Ptr& publisher) {
CHECK(publisher);
publishers_.push_back(std::move(publisher));
}
void dataProviderCallback(const SimulatorData& sim_data);
void dataProviderCallback(const SimulatorData& sim_data);
void publishData(const SimulatorData &sim_data,
const EventsVector &events,
bool camera_simulator_success,
const ImagePtrVector &camera_images);
void publishData(
const SimulatorData& sim_data,
const EventsVector& events,
bool camera_simulator_success,
const ImagePtrVector& camera_images
);
private:
size_t num_cameras_;
std::vector<EventSimulator> event_simulators_;
private:
size_t num_cameras_;
std::vector<EventSimulator> event_simulators_;
std::vector<CameraSimulator> camera_simulators_;
Duration exposure_time_;
std::vector<CameraSimulator> camera_simulators_;
Duration exposure_time_;
std::vector<Publisher::Ptr> publishers_;
std::vector<Publisher::Ptr> publishers_;
ImagePtrVector corrupted_camera_images_;
};
ImagePtrVector corrupted_camera_images_;
};
} // namespace event_camera_simulator

View File

@ -88,7 +88,6 @@ namespace event_camera_simulator {
<< buffer_->getExposureTime() << std::endl;
exposures_file_.close();
*camera_image /= denom;
cv::Mat disp;
camera_image->convertTo(disp, CV_8U, 255);

View File

@ -1,119 +1,123 @@
#include <esim/esim/event_simulator.hpp>
#include <ze/common/random.hpp>
#include <glog/logging.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <ze/common/random.hpp>
#include <ze/common/time_conversions.hpp>
namespace event_camera_simulator {
void EventSimulator::init(const Image &img, Time time)
{
VLOG(1) << "Initialized event camera simulator with sensor size: " << img.size();
VLOG(1) << "and contrast thresholds: C+ = " << config_.Cp << " , C- = " << config_.Cm;
is_initialized_ = true;
last_img_ = img.clone();
ref_values_ = img.clone();
last_event_timestamp_ = TimestampImage::zeros(img.size());
current_time_ = time;
size_ = img.size();
}
void EventSimulator::init(const Image& img, Time time) {
VLOG(1) << "Initialized event camera simulator with sensor size: "
<< img.size();
VLOG(1) << "and contrast thresholds: C+ = " << config_.Cp
<< " , C- = " << config_.Cm;
is_initialized_ = true;
last_img_ = img.clone();
ref_values_ = img.clone();
last_event_timestamp_ = TimestampImage::zeros(img.size());
current_time_ = time;
size_ = img.size();
}
Events EventSimulator::imageCallback(const Image& img, Time time)
{
CHECK_GE(time, 0);
Image preprocessed_img = img.clone();
if(config_.use_log_image)
{
LOG_FIRST_N(INFO, 1) << "Converting the image to log image with eps = " << config_.log_eps << ".";
cv::log(config_.log_eps + img, preprocessed_img);
}
if(!is_initialized_)
{
init(preprocessed_img, time);
return {};
}
// For each pixel, check if new events need to be generated since the last image sample
static constexpr ImageFloatType tolerance = 1e-6;
Events events;
Duration delta_t_ns = time - current_time_;
CHECK_GT(delta_t_ns, 0u);
CHECK_EQ(img.size(), size_);
for (int y = 0; y < size_.height; ++y)
{
for (int x = 0; x < size_.width; ++x)
{
ImageFloatType itdt = preprocessed_img(y, x);
ImageFloatType it = last_img_(y, x);
ImageFloatType prev_cross = ref_values_(y, x);
if (std::fabs (it - itdt) > tolerance)
{
ImageFloatType pol = (itdt >= it) ? +1.0 : -1.0;
ImageFloatType C = (pol > 0) ? config_.Cp : config_.Cm;
ImageFloatType sigma_C = (pol > 0) ? config_.sigma_Cp : config_.sigma_Cm;
if(sigma_C > 0)
{
C += ze::sampleNormalDistribution<ImageFloatType>(false, 0, sigma_C);
constexpr ImageFloatType minimum_contrast_threshold = 0.01;
C = std::max(minimum_contrast_threshold, C);
Events EventSimulator::imageCallback(const Image& img, Time time) {
CHECK_GE(time, 0);
Image preprocessed_img = img.clone();
if (config_.use_log_image) {
LOG_FIRST_N(INFO, 1)
<< "Converting the image to log image with eps = "
<< config_.log_eps << ".";
cv::log(config_.log_eps + img, preprocessed_img);
}
ImageFloatType curr_cross = prev_cross;
bool all_crossings = false;
do
{
curr_cross += pol * C;
if (!is_initialized_) {
init(preprocessed_img, time);
return {};
}
if ((pol > 0 && curr_cross > it && curr_cross <= itdt)
|| (pol < 0 && curr_cross < it && curr_cross >= itdt))
{
Duration edt = (curr_cross - it) * delta_t_ns / (itdt - it);
Time t = current_time_ + edt;
// For each pixel, check if new events need to be generated since the
// last image sample
static constexpr ImageFloatType tolerance = 1e-6;
Events events;
Duration delta_t_ns = time - current_time_;
// check that pixel (x,y) is not currently in a "refractory" state
// i.e. |t-that last_timestamp(x,y)| >= refractory_period
const Time last_stamp_at_xy = ze::secToNanosec(last_event_timestamp_(y,x));
CHECK_GE(t, last_stamp_at_xy);
const Duration dt = t - last_stamp_at_xy;
if(last_event_timestamp_(y,x) == 0 || dt >= config_.refractory_period_ns)
{
events.push_back(Event(x, y, t, pol > 0));
last_event_timestamp_(y,x) = ze::nanosecToSecTrunc(t);
}
else
{
VLOG(3) << "Dropping event because time since last event ("
<< dt << " ns) < refractory period ("
<< config_.refractory_period_ns << " ns).";
}
ref_values_(y,x) = curr_cross;
}
else
{
all_crossings = true;
}
} while (!all_crossings);
} // end tolerance
} // end for each pixel
}
CHECK_GT(delta_t_ns, 0u);
CHECK_EQ(img.size(), size_);
// update simvars for next loop
current_time_ = time;
last_img_ = preprocessed_img.clone(); // it is now the latest image
for (int y = 0; y < size_.height; ++y) {
for (int x = 0; x < size_.width; ++x) {
ImageFloatType itdt = preprocessed_img(y, x);
ImageFloatType it = last_img_(y, x);
ImageFloatType prev_cross = ref_values_(y, x);
// Sort the events by increasing timestamps, since this is what
// most event processing algorithms expect
sort(events.begin(), events.end(),
[](const Event& a, const Event& b) -> bool
{
return a.t < b.t;
});
if (std::fabs(it - itdt) > tolerance) {
ImageFloatType pol = (itdt >= it) ? +1.0 : -1.0;
ImageFloatType C = (pol > 0) ? config_.Cp : config_.Cm;
ImageFloatType sigma_C =
(pol > 0) ? config_.sigma_Cp : config_.sigma_Cm;
if (sigma_C > 0) {
C += ze::sampleNormalDistribution<ImageFloatType>(
false,
0,
sigma_C
);
constexpr ImageFloatType minimum_contrast_threshold =
0.01;
C = std::max(minimum_contrast_threshold, C);
}
ImageFloatType curr_cross = prev_cross;
bool all_crossings = false;
return events;
}
do {
curr_cross += pol * C;
if ((pol > 0 && curr_cross > it && curr_cross <= itdt)
|| (pol < 0 && curr_cross < it && curr_cross >= itdt
)) {
Duration edt =
(curr_cross - it) * delta_t_ns / (itdt - it);
Time t = current_time_ + edt;
// check that pixel (x,y) is not currently in a
// "refractory" state i.e. |t-that
// last_timestamp(x,y)| >= refractory_period
const Time last_stamp_at_xy =
ze::secToNanosec(last_event_timestamp_(y, x));
CHECK_GE(t, last_stamp_at_xy);
const Duration dt = t - last_stamp_at_xy;
if (last_event_timestamp_(y, x) == 0
|| dt >= config_.refractory_period_ns) {
events.push_back(Event(x, y, t, pol > 0));
last_event_timestamp_(y, x) =
ze::nanosecToSecTrunc(t);
} else {
VLOG(3)
<< "Dropping event because time since last "
"event ("
<< dt << " ns) < refractory period ("
<< config_.refractory_period_ns << " ns).";
}
ref_values_(y, x) = curr_cross;
} else {
all_crossings = true;
}
} while (!all_crossings);
} // end tolerance
} // end for each pixel
}
// update simvars for next loop
current_time_ = time;
last_img_ = preprocessed_img.clone(); // it is now the latest image
// Sort the events by increasing timestamps, since this is what
// most event processing algorithms expect
sort(
events.begin(),
events.end(),
[](const Event& a, const Event& b) -> bool { return a.t < b.t; }
);
return events;
}
} // namespace event_camera_simulator

View File

@ -1,143 +1,160 @@
#include <esim/common/utils.hpp>
#include <esim/esim/simulator.hpp>
#include <ze/common/timer_collection.hpp>
#include <esim/common/utils.hpp>
namespace event_camera_simulator {
DECLARE_TIMER(TimerEventSimulator, timers_event_simulator_,
simulate_events,
visualization
);
DECLARE_TIMER(
TimerEventSimulator,
timers_event_simulator_,
simulate_events,
visualization
);
Simulator::~Simulator()
{
timers_event_simulator_.saveToFile("/tmp", "event_simulator.csv");
}
Simulator::~Simulator() {
timers_event_simulator_.saveToFile("/tmp", "event_simulator.csv");
}
void Simulator::dataProviderCallback(const SimulatorData &sim_data)
{
CHECK_EQ(event_simulators_.size(), num_cameras_);
void Simulator::dataProviderCallback(const SimulatorData& sim_data) {
CHECK_EQ(event_simulators_.size(), num_cameras_);
bool camera_simulator_success;
bool camera_simulator_success;
if(sim_data.images_updated)
{
EventsVector events(num_cameras_);
Time time = sim_data.timestamp;
// simulate the events and camera images for every sensor in the rig
{
auto t = timers_event_simulator_[TimerEventSimulator::simulate_events].timeScope();
for(size_t i=0; i<num_cameras_; ++i)
{
events[i] = event_simulators_[i].imageCallback(*sim_data.images[i], time);
if (sim_data.images_updated) {
EventsVector events(num_cameras_);
Time time = sim_data.timestamp;
// simulate the events and camera images for every sensor in the rig
{
auto t = timers_event_simulator_
[TimerEventSimulator::simulate_events]
.timeScope();
for (size_t i = 0; i < num_cameras_; ++i) {
events[i] = event_simulators_[i].imageCallback(
*sim_data.images[i],
time
);
if(corrupted_camera_images_.size() < num_cameras_)
{
// allocate memory for the corrupted camera images and set them to 0
corrupted_camera_images_.emplace_back(std::make_shared<Image>(sim_data.images[i]->size()));
corrupted_camera_images_[i]->setTo(0.);
if (corrupted_camera_images_.size() < num_cameras_) {
// allocate memory for the corrupted camera images and
// set them to 0
corrupted_camera_images_.emplace_back(
std::make_shared<Image>(sim_data.images[i]->size())
);
corrupted_camera_images_[i]->setTo(0.);
}
camera_simulator_success =
camera_simulators_[i].imageCallback(
*sim_data.images[i],
time,
corrupted_camera_images_[i]
);
}
}
// publish the simulation data + events
{
auto t =
timers_event_simulator_[TimerEventSimulator::visualization]
.timeScope();
publishData(
sim_data,
events,
camera_simulator_success,
corrupted_camera_images_
);
}
} else {
{
// just forward the simulation data to the publisher
auto t =
timers_event_simulator_[TimerEventSimulator::visualization]
.timeScope();
publishData(
sim_data,
{},
camera_simulator_success,
corrupted_camera_images_
);
}
}
}
void Simulator::publishData(
const SimulatorData& sim_data,
const EventsVector& events,
bool camera_simulator_success,
const ImagePtrVector& camera_images
) {
if (publishers_.empty()) {
LOG_FIRST_N(WARNING, 1) << "No publisher available";
return;
}
camera_simulator_success = camera_simulators_[i].imageCallback(*sim_data.images[i], time, corrupted_camera_images_[i]);
}
Time time = sim_data.timestamp;
const Transformation& T_W_B = sim_data.groundtruth.T_W_B;
const TransformationVector& T_W_Cs = sim_data.groundtruth.T_W_Cs;
const ze::CameraRig::Ptr& camera_rig = sim_data.camera_rig;
// Publish the new data (events, images, depth maps, poses, point
// clouds, etc.)
if (!events.empty())
for (const Publisher::Ptr& publisher : publishers_)
publisher->eventsCallback(events);
if (sim_data.poses_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->poseCallback(T_W_B, T_W_Cs, time);
if (sim_data.twists_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->twistCallback(
sim_data.groundtruth.angular_velocities_,
sim_data.groundtruth.linear_velocities_,
time
);
if (sim_data.imu_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->imuCallback(
sim_data.specific_force_corrupted,
sim_data.angular_velocity_corrupted,
time
);
if (camera_rig)
for (const Publisher::Ptr& publisher : publishers_)
publisher->cameraInfoCallback(camera_rig, time);
if (sim_data.images_updated) {
for (const Publisher::Ptr& publisher : publishers_) {
publisher->imageCallback(sim_data.images, time);
if (camera_simulator_success && time >= exposure_time_) {
// the images should be timestamped at mid-exposure
const Time mid_exposure_time = time - 0.5 * exposure_time_;
publisher->imageCorruptedCallback(
camera_images,
mid_exposure_time
);
}
}
}
if (sim_data.depthmaps_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->depthmapCallback(sim_data.depthmaps, time);
if (sim_data.optic_flows_updated)
for (const Publisher::Ptr& publisher : publishers_)
publisher->opticFlowCallback(sim_data.optic_flows, time);
if (sim_data.depthmaps_updated && !events.empty()) {
PointCloudVector pointclouds(num_cameras_);
for (size_t i = 0; i < num_cameras_; ++i) {
CHECK(sim_data.depthmaps[i]);
pointclouds[i] = eventsToPointCloud(
events[i],
*sim_data.depthmaps[i],
camera_rig->atShared(i)
);
}
for (const Publisher::Ptr& publisher : publishers_)
publisher->pointcloudCallback(pointclouds, time);
}
}
// publish the simulation data + events
{
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
publishData(sim_data, events, camera_simulator_success, corrupted_camera_images_);
}
}
else
{
{
// just forward the simulation data to the publisher
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
publishData(sim_data, {}, camera_simulator_success, corrupted_camera_images_);
}
}
}
void Simulator::publishData(const SimulatorData& sim_data,
const EventsVector& events,
bool camera_simulator_success,
const ImagePtrVector& camera_images)
{
if(publishers_.empty())
{
LOG_FIRST_N(WARNING, 1) << "No publisher available";
return;
}
Time time = sim_data.timestamp;
const Transformation& T_W_B = sim_data.groundtruth.T_W_B;
const TransformationVector& T_W_Cs = sim_data.groundtruth.T_W_Cs;
const ze::CameraRig::Ptr& camera_rig = sim_data.camera_rig;
// Publish the new data (events, images, depth maps, poses, point clouds, etc.)
if(!events.empty())
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->eventsCallback(events);
}
if(sim_data.poses_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->poseCallback(T_W_B, T_W_Cs, time);
}
if(sim_data.twists_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->twistCallback(sim_data.groundtruth.angular_velocities_,
sim_data.groundtruth.linear_velocities_,
time);
}
if(sim_data.imu_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->imuCallback(sim_data.specific_force_corrupted, sim_data.angular_velocity_corrupted, time);
}
if(camera_rig)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->cameraInfoCallback(camera_rig, time);
}
if(sim_data.images_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
{
publisher->imageCallback(sim_data.images, time);
if(camera_simulator_success && time >= exposure_time_)
{
// the images should be timestamped at mid-exposure
const Time mid_exposure_time = time - 0.5 * exposure_time_;
publisher->imageCorruptedCallback(camera_images, mid_exposure_time);
}
}
}
if(sim_data.depthmaps_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->depthmapCallback(sim_data.depthmaps, time);
}
if(sim_data.optic_flows_updated)
{
for(const Publisher::Ptr& publisher : publishers_)
publisher->opticFlowCallback(sim_data.optic_flows, time);
}
if(sim_data.depthmaps_updated && !events.empty())
{
PointCloudVector pointclouds(num_cameras_);
for(size_t i=0; i<num_cameras_; ++i)
{
CHECK(sim_data.depthmaps[i]);
pointclouds[i] = eventsToPointCloud(events[i], *sim_data.depthmaps[i], camera_rig->atShared(i));
}
for(const Publisher::Ptr& publisher : publishers_)
publisher->pointcloudCallback(pointclouds, time);
}
}
} // namespace event_camera_simulator

View File

@ -1,237 +1,225 @@
#include <esim/esim/event_simulator.hpp>
#include <ze/common/test_entrypoint.hpp>
#include <fstream>
#include <opencv2/highgui/highgui.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/path_utils.hpp>
#include <ze/common/string_utils.hpp>
#include <ze/common/file_utils.hpp>
#include <ze/common/test_entrypoint.hpp>
#include <ze/common/time_conversions.hpp>
#include <ze/matplotlib/matplotlibcpp.hpp>
#include <opencv2/highgui/highgui.hpp>
#define USE_OPENCV
namespace event_camera_simulator
{
namespace event_camera_simulator {
class CSVImageLoader
{
public:
CSVImageLoader(const std::string& path_to_data_folder)
: path_to_data_folder_(path_to_data_folder)
{
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv"));
CHECK(images_in_str_.is_open());
}
class CSVImageLoader {
public:
CSVImageLoader(const std::string& path_to_data_folder)
: path_to_data_folder_(path_to_data_folder) {
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv")
);
CHECK(images_in_str_.is_open());
}
bool next(int64_t& stamp, Image& img)
{
std::string line;
if(!getline(images_in_str_, line))
{
LOG(INFO) << "Finished reading all the images in the folder";
return false;
}
bool next(int64_t& stamp, Image& img) {
std::string line;
if (!getline(images_in_str_, line)) {
LOG(INFO) << "Finished reading all the images in the folder";
return false;
}
if('%' != line.at(0) && '#' != line.at(0))
{
std::vector<std::string> items = ze::splitString(line, delimiter_);
stamp = std::stoll(items[0]);
if ('%' != line.at(0) && '#' != line.at(0)) {
std::vector<std::string> items =
ze::splitString(line, delimiter_);
stamp = std::stoll(items[0]);
const std::string& path_to_image
= ze::joinPath(path_to_data_folder_, "frame", "cam_0", items[1]);
const std::string& path_to_image = ze::joinPath(
path_to_data_folder_,
"frame",
"cam_0",
items[1]
);
img = cv::imread(path_to_image, 0);
CHECK(img.data) << "Error loading image: " << path_to_image;
img = cv::imread(path_to_image, 0);
CHECK(img.data) << "Error loading image: " << path_to_image;
return true;
}
else
{
return next(stamp, img);
}
}
return true;
} else {
return next(stamp, img);
}
}
private:
std::ifstream images_in_str_;
const char delimiter_{','};
std::string path_to_data_folder_;
};
private:
std::ifstream images_in_str_;
const char delimiter_{','};
std::string path_to_data_folder_;
};
} // namespace event_camera_simulator
std::string getTestDataDir(const std::string& dataset_name)
{
using namespace ze;
std::string getTestDataDir(const std::string& dataset_name) {
using namespace ze;
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
CHECK(datapath_dir != nullptr)
<< "Did you download the esim_test_data repository and set "
<< "the ESIM_TEST_DATA_PATH environment variable?";
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
CHECK(datapath_dir != nullptr)
<< "Did you download the esim_test_data repository and set "
<< "the ESIM_TEST_DATA_PATH environment variable?";
std::string path(datapath_dir);
CHECK(isDir(path)) << "Folder does not exist: " << path;
path = path + "/data/" + dataset_name;
CHECK(isDir(path)) << "Dataset does not exist: " << path;
return path;
std::string path(datapath_dir);
CHECK(isDir(path)) << "Folder does not exist: " << path;
path = path + "/data/" + dataset_name;
CHECK(isDir(path)) << "Dataset does not exist: " << path;
return path;
}
TEST(EventSimulator, testImageReconstruction)
{
using namespace event_camera_simulator;
TEST(EventSimulator, testImageReconstruction) {
using namespace event_camera_simulator;
// Load image sequence from folder
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
CSVImageLoader reader(path_to_data_folder);
// Load image sequence from folder
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
CSVImageLoader reader(path_to_data_folder);
EventSimulator::Config event_sim_config;
event_sim_config.Cp = 0.05;
event_sim_config.Cm = 0.03;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = 0;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
EventSimulator::Config event_sim_config;
event_sim_config.Cp = 0.05;
event_sim_config.Cm = 0.03;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = 0;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp
<< ", C- = " << event_sim_config.Cm;
LOG(INFO) << "Testing event camera simulator with C+ = "
<< event_sim_config.Cp << ", C- = " << event_sim_config.Cm;
const ImageFloatType max_reconstruction_error
= std::max(event_sim_config.Cp, event_sim_config.Cm);
const ImageFloatType max_reconstruction_error =
std::max(event_sim_config.Cp, event_sim_config.Cm);
bool is_first_image = true;
Image I, L, L_reconstructed;
int64_t stamp;
while(reader.next(stamp, I))
{
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.);
cv::log(event_sim_config.log_eps + I, L);
bool is_first_image = true;
Image I, L, L_reconstructed;
int64_t stamp;
while (reader.next(stamp, I)) {
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1. / 255.);
cv::log(event_sim_config.log_eps + I, L);
if(is_first_image)
{
// Initialize reconstructed image from the ground truth image
L_reconstructed = L.clone();
is_first_image = false;
}
if (is_first_image) {
// Initialize reconstructed image from the ground truth image
L_reconstructed = L.clone();
is_first_image = false;
}
Events events = simulator.imageCallback(I, stamp);
Events events = simulator.imageCallback(I, stamp);
// Reconstruct next image from previous one using the events in between
for(const Event& e : events)
{
ImageFloatType pol = e.pol ? 1. : -1.;
const ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm;
L_reconstructed(e.y,e.x) += pol * C;
}
// Reconstruct next image from previous one using the events in between
for (const Event& e : events) {
ImageFloatType pol = e.pol ? 1. : -1.;
const ImageFloatType C =
e.pol ? event_sim_config.Cp : event_sim_config.Cm;
L_reconstructed(e.y, e.x) += pol * C;
}
// Check that the reconstruction error is bounded by the contrast thresholds
for(int y=0; y<I.rows; ++y)
{
for(int x=0; x<I.cols; ++x)
{
const ImageFloatType reconstruction_error = std::fabs(L_reconstructed(y,x) - L(y,x));
VLOG_EVERY_N(2, I.rows * I.cols) << reconstruction_error;
EXPECT_LE(reconstruction_error, max_reconstruction_error);
}
}
// Check that the reconstruction error is bounded by the contrast
// thresholds
for (int y = 0; y < I.rows; ++y) {
for (int x = 0; x < I.cols; ++x) {
const ImageFloatType reconstruction_error =
std::fabs(L_reconstructed(y, x) - L(y, x));
VLOG_EVERY_N(2, I.rows * I.cols) << reconstruction_error;
EXPECT_LE(reconstruction_error, max_reconstruction_error);
}
}
#ifdef USE_OPENCV
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp);
cv::waitKey(1);
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp);
cv::waitKey(1);
#endif
}
}
}
TEST(EventSimulator, testEvolutionReconstructionError) {
using namespace event_camera_simulator;
TEST(EventSimulator, testEvolutionReconstructionError)
{
using namespace event_camera_simulator;
// Load image sequence from folder
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
CSVImageLoader reader(path_to_data_folder);
// Load image sequence from folder
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
CSVImageLoader reader(path_to_data_folder);
EventSimulator::Config event_sim_config;
event_sim_config.Cp = 0.5;
event_sim_config.Cm = event_sim_config.Cp;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = event_sim_config.sigma_Cp;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
const double contrast_bias = 0.0;
EventSimulator::Config event_sim_config;
event_sim_config.Cp = 0.5;
event_sim_config.Cm = event_sim_config.Cp;
event_sim_config.sigma_Cp = 0;
event_sim_config.sigma_Cm = event_sim_config.sigma_Cp;
event_sim_config.use_log_image = true;
event_sim_config.log_eps = 0.001;
EventSimulator simulator(event_sim_config);
const double contrast_bias = 0.0;
LOG(INFO) << "Testing event camera simulator with C+ = "
<< event_sim_config.Cp << ", C- = " << event_sim_config.Cm;
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp
<< ", C- = " << event_sim_config.Cm;
std::vector<ze::real_t> times, mean_errors, stddev_errors;
bool is_first_image = true;
Image I, L, L_reconstructed;
int64_t stamp;
while (reader.next(stamp, I)) {
LOG_EVERY_N(INFO, 50) << "t = " << ze::nanosecToSecTrunc(stamp) << " s";
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1. / 255.);
cv::log(event_sim_config.log_eps + I, L);
std::vector<ze::real_t> times, mean_errors, stddev_errors;
bool is_first_image = true;
Image I, L, L_reconstructed;
int64_t stamp;
while(reader.next(stamp, I))
{
LOG_EVERY_N(INFO, 50) << "t = " << ze::nanosecToSecTrunc(stamp) << " s";
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.);
cv::log(event_sim_config.log_eps + I, L);
if (is_first_image) {
// Initialize reconstructed image from the ground truth image
L_reconstructed = L.clone();
is_first_image = false;
}
if(is_first_image)
{
// Initialize reconstructed image from the ground truth image
L_reconstructed = L.clone();
is_first_image = false;
}
Events events = simulator.imageCallback(I, stamp);
Events events = simulator.imageCallback(I, stamp);
// Reconstruct next image from previous one using the events in between
for (const Event& e : events) {
ImageFloatType pol = e.pol ? 1. : -1.;
ImageFloatType C =
e.pol ? event_sim_config.Cp : event_sim_config.Cm;
C += contrast_bias;
L_reconstructed(e.y, e.x) += pol * C;
}
// Reconstruct next image from previous one using the events in between
for(const Event& e : events)
{
ImageFloatType pol = e.pol ? 1. : -1.;
ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm;
C += contrast_bias;
L_reconstructed(e.y,e.x) += pol * C;
}
// Compute the mean and average reconstruction error over the whole
// image
Image error;
cv::absdiff(L, L_reconstructed, error);
cv::Scalar mean_error, stddev_error;
cv::meanStdDev(error, mean_error, stddev_error);
VLOG(1) << "Mean error: " << mean_error << ", Stddev: " << stddev_error;
// Compute the mean and average reconstruction error over the whole image
Image error;
cv::absdiff(L, L_reconstructed, error);
cv::Scalar mean_error, stddev_error;
cv::meanStdDev(error, mean_error, stddev_error);
VLOG(1) << "Mean error: " << mean_error
<< ", Stddev: " << stddev_error;
times.push_back(ze::nanosecToSecTrunc(stamp));
mean_errors.push_back(mean_error[0]);
stddev_errors.push_back(stddev_error[0]);
times.push_back(ze::nanosecToSecTrunc(stamp));
mean_errors.push_back(mean_error[0]);
stddev_errors.push_back(stddev_error[0]);
#ifdef USE_OPENCV
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp);
cv::waitKey(1);
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
disp.convertTo(disp, CV_8U);
cv::imshow("Reconstructed", disp);
cv::waitKey(1);
#endif
}
}
// Plot the mean and stddev reconstruction error over time
ze::plt::plot(times, mean_errors, "r");
ze::plt::plot(times, stddev_errors, "b--");
std::stringstream title;
title << "C = " << event_sim_config.Cp
<< ", sigma = " << event_sim_config.sigma_Cp
<< ", bias = " << contrast_bias;
ze::plt::title(title.str());
ze::plt::xlabel("Time (s)");
ze::plt::ylabel("Mean / Stddev reconstruction error");
ze::plt::grid(true);
ze::plt::save("/tmp/evolution_reconstruction_error.pdf");
ze::plt::show();
// Plot the mean and stddev reconstruction error over time
ze::plt::plot(times, mean_errors, "r");
ze::plt::plot(times, stddev_errors, "b--");
std::stringstream title;
title << "C = " << event_sim_config.Cp
<< ", sigma = " << event_sim_config.sigma_Cp
<< ", bias = " << contrast_bias;
ze::plt::title(title.str());
ze::plt::xlabel("Time (s)");
ze::plt::ylabel("Mean / Stddev reconstruction error");
ze::plt::grid(true);
ze::plt::save("/tmp/evolution_reconstruction_error.pdf");
ze::plt::show();
}
ZE_UNITTEST_ENTRYPOINT