mirror of
https://github.com/karma-riuk/hdr_esim.git
synced 2024-11-16 11:20:25 +01:00
initial commit
This commit is contained in:
commit
a8c2f0ca43
19
README.md
Normal file
19
README.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
## Features
|
||||||
|
|
||||||
|
- Accurate event simulation, guaranteed by the tight integration between the rendering engine and the event simulator
|
||||||
|
- IMU simulation
|
||||||
|
- Support for multi-camera systems
|
||||||
|
- Ground truth camera poses, IMU biases, angular/linear velocities, depth maps, and optic flow maps
|
||||||
|
- Support for camera distortion
|
||||||
|
- Different C+/C- contrast thresholds
|
||||||
|
- Basic noise simulation for event cameras (based on additive Gaussian noise on the contrast threshold)
|
||||||
|
- Motion blur simulation
|
||||||
|
- Publish to ROS and/or save data to rosbag
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
Installation instructions can be found in [our wiki](https://github.com/uzh-rpg/rpg_event_camera_simulator/wiki/Installation).
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
Specific instructions to run the simulator depending on the chosen rendering engine can be found in [our wiki](https://github.com/uzh-rpg/rpg_event_camera_simulator/wiki).
|
45
dependencies.yaml
Normal file
45
dependencies.yaml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
repositories:
|
||||||
|
catkin_simple:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:catkin/catkin_simple.git
|
||||||
|
version: master
|
||||||
|
ze_oss:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:uzh-rpg/ze_oss.git
|
||||||
|
version: master
|
||||||
|
gflags_catkin:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/gflags_catkin.git
|
||||||
|
version: master
|
||||||
|
glog_catkin:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/glog_catkin.git
|
||||||
|
version: master
|
||||||
|
eigen_catkin:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/eigen_catkin.git
|
||||||
|
version: master
|
||||||
|
eigen_checks:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/eigen_checks.git
|
||||||
|
version: master
|
||||||
|
minkindr:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/minkindr.git
|
||||||
|
version: master
|
||||||
|
minkindr_ros:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/minkindr_ros.git
|
||||||
|
version: master
|
||||||
|
yaml_cpp_catkin:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:ethz-asl/yaml_cpp_catkin.git
|
||||||
|
version: master
|
||||||
|
rpg_dvs_ros:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:uzh-rpg/rpg_dvs_ros.git
|
||||||
|
version: master
|
||||||
|
assimp_catkin:
|
||||||
|
type: git
|
||||||
|
url: git@github.com:uzh-rpg/assimp_catkin.git
|
||||||
|
version: master
|
4
event_camera_simulator/.gitignore
vendored
Normal file
4
event_camera_simulator/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
*.txt.user
|
||||||
|
esim_ros/scripts/exp_*
|
||||||
|
*.swp
|
||||||
|
*.autosave
|
30
event_camera_simulator/esim/CMakeLists.txt
Normal file
30
event_camera_simulator/esim/CMakeLists.txt
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/esim/simulator.hpp
|
||||||
|
include/esim/esim/event_simulator.hpp
|
||||||
|
include/esim/esim/camera_simulator.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/simulator.cpp
|
||||||
|
src/event_simulator.cpp
|
||||||
|
src/camera_simulator.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
##########
|
||||||
|
# GTESTS #
|
||||||
|
##########
|
||||||
|
|
||||||
|
catkin_add_gtest(test_event_simulator test/test_event_simulator.cpp)
|
||||||
|
target_link_libraries(test_event_simulator ${PROJECT_NAME} ${OpenCV_LIBRARIES})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,72 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <deque>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class ImageBuffer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ZE_POINTER_TYPEDEFS(ImageBuffer);
|
||||||
|
|
||||||
|
struct ImageData
|
||||||
|
{
|
||||||
|
ImageData(Image img, Time stamp, Duration exposure_time)
|
||||||
|
: image(img),
|
||||||
|
stamp(stamp),
|
||||||
|
exposure_time(exposure_time) {}
|
||||||
|
|
||||||
|
Image image;
|
||||||
|
Time stamp;
|
||||||
|
Duration exposure_time; // timestamp since last image (0 if this is the first image)
|
||||||
|
};
|
||||||
|
|
||||||
|
using ExposureImage = std::pair<Duration, Image>;
|
||||||
|
|
||||||
|
// Rolling image buffer of mazimum size 'buffer_size_ns'.
|
||||||
|
ImageBuffer(Duration buffer_size_ns)
|
||||||
|
: buffer_size_ns_(buffer_size_ns) {}
|
||||||
|
|
||||||
|
void addImage(Time t, const Image& img);
|
||||||
|
|
||||||
|
std::deque<ImageData> getRawBuffer() const { return data_; }
|
||||||
|
|
||||||
|
size_t size() const { return data_.size(); }
|
||||||
|
|
||||||
|
Duration getExposureTime() const { return buffer_size_ns_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
Duration buffer_size_ns_;
|
||||||
|
std::deque<ImageData> data_;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The CameraSimulator takes as input a sequence of stamped images,
|
||||||
|
* assumed to be sampled at a "sufficiently high" framerate and with
|
||||||
|
* floating-point precision, and treats each image as a measure of
|
||||||
|
* irradiance.
|
||||||
|
* From this, it simulates a real camera, including motion blur.
|
||||||
|
*
|
||||||
|
* @TODO: simulate a non-linear camera response curve, shot noise, etc.
|
||||||
|
*/
|
||||||
|
class CameraSimulator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CameraSimulator(double exposure_time_ms)
|
||||||
|
: exposure_time_(ze::secToNanosec(exposure_time_ms / 1000.0))
|
||||||
|
{
|
||||||
|
buffer_.reset(new ImageBuffer(exposure_time_));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool imageCallback(const Image& img, Time time,
|
||||||
|
const ImagePtr &camera_image);
|
||||||
|
|
||||||
|
private:
|
||||||
|
ImageBuffer::Ptr buffer_;
|
||||||
|
const Duration exposure_time_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,54 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The EventSimulator takes as input a sequence of stamped images,
|
||||||
|
* assumed to be sampled at a "sufficiently high" framerate,
|
||||||
|
* and simulates the principle of operation of an idea event camera
|
||||||
|
* with a constant contrast threshold C.
|
||||||
|
* Pixel-wise intensity values are linearly interpolated in time.
|
||||||
|
*
|
||||||
|
* The pixel-wise voltages are reset with the values from the first image
|
||||||
|
* which is passed to the simulator.
|
||||||
|
*/
|
||||||
|
class EventSimulator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
struct Config
|
||||||
|
{
|
||||||
|
double Cp;
|
||||||
|
double Cm;
|
||||||
|
double sigma_Cp;
|
||||||
|
double sigma_Cm;
|
||||||
|
Duration refractory_period_ns;
|
||||||
|
bool use_log_image;
|
||||||
|
double log_eps;
|
||||||
|
};
|
||||||
|
|
||||||
|
using TimestampImage = cv::Mat_<ze::real_t>;
|
||||||
|
|
||||||
|
EventSimulator(const Config& config)
|
||||||
|
: config_(config),
|
||||||
|
is_initialized_(false),
|
||||||
|
current_time_(0)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void init(const Image& img, Time time);
|
||||||
|
Events imageCallback(const Image& img, Time time);
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool is_initialized_;
|
||||||
|
Time current_time_;
|
||||||
|
Image ref_values_;
|
||||||
|
Image last_img_;
|
||||||
|
TimestampImage last_event_timestamp_;
|
||||||
|
cv::Size size_;
|
||||||
|
|
||||||
|
Config config_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
59
event_camera_simulator/esim/include/esim/esim/simulator.hpp
Normal file
59
event_camera_simulator/esim/include/esim/esim/simulator.hpp
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/esim/event_simulator.hpp>
|
||||||
|
#include <esim/esim/camera_simulator.hpp>
|
||||||
|
#include <esim/visualization/publisher_interface.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
/* The Simulator forwards the simulated images / depth maps
|
||||||
|
* from the data provider to multiple, specialized camera simulators, such as:
|
||||||
|
* (i) event camera simulators that simulate events based on sequences of images
|
||||||
|
* (ii) camera simulators that simulate real cameras
|
||||||
|
* (including motion blur, camera response function, noise, etc.)
|
||||||
|
*
|
||||||
|
* The Simulator then forwards the simulated data to one or more publishers.
|
||||||
|
*/
|
||||||
|
class Simulator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Simulator(size_t num_cameras,
|
||||||
|
const EventSimulator::Config& event_sim_config,
|
||||||
|
double exposure_time_ms)
|
||||||
|
: num_cameras_(num_cameras),
|
||||||
|
exposure_time_(ze::millisecToNanosec(exposure_time_ms))
|
||||||
|
{
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
event_simulators_.push_back(EventSimulator(event_sim_config));
|
||||||
|
camera_simulators_.push_back(CameraSimulator(exposure_time_ms));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~Simulator();
|
||||||
|
|
||||||
|
void addPublisher(const Publisher::Ptr& publisher)
|
||||||
|
{
|
||||||
|
CHECK(publisher);
|
||||||
|
publishers_.push_back(std::move(publisher));
|
||||||
|
}
|
||||||
|
|
||||||
|
void dataProviderCallback(const SimulatorData& sim_data);
|
||||||
|
|
||||||
|
void publishData(const SimulatorData &sim_data,
|
||||||
|
const EventsVector &events,
|
||||||
|
const ImagePtrVector &camera_images);
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t num_cameras_;
|
||||||
|
std::vector<EventSimulator> event_simulators_;
|
||||||
|
|
||||||
|
std::vector<CameraSimulator> camera_simulators_;
|
||||||
|
Duration exposure_time_;
|
||||||
|
|
||||||
|
std::vector<Publisher::Ptr> publishers_;
|
||||||
|
|
||||||
|
ImagePtrVector corrupted_camera_images_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
20
event_camera_simulator/esim/package.xml
Normal file
20
event_camera_simulator/esim/package.xml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Event camera simulator, which can simulate events from a stream of images samplet at high frequency.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>esim_data_provider</depend>
|
||||||
|
<depend>esim_visualization</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
62
event_camera_simulator/esim/src/camera_simulator.cpp
Normal file
62
event_camera_simulator/esim/src/camera_simulator.cpp
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
#include <esim/esim/camera_simulator.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
void ImageBuffer::addImage(Time t, const Image& img)
|
||||||
|
{
|
||||||
|
if(!data_.empty())
|
||||||
|
{
|
||||||
|
// Check that the image timestamps are monotonically increasing
|
||||||
|
CHECK_GT(t, data_.back().stamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
Duration exposure_time = data_.empty() ? 0 : t - data_.back().stamp;
|
||||||
|
VLOG(2) << "Adding image to buffer with stamp: " << t
|
||||||
|
<< " and exposure time " << exposure_time;
|
||||||
|
data_.push_back(ImageData(img.clone(), t, exposure_time));
|
||||||
|
|
||||||
|
// Remove all the images with timestamp older than t - buffer_size_ns_
|
||||||
|
auto first_valid_element = std::lower_bound(data_.begin(), data_.end(), t - buffer_size_ns_,
|
||||||
|
[](ImageData lhs, Time rhs) -> bool { return lhs.stamp < rhs; });
|
||||||
|
|
||||||
|
data_.erase(data_.begin(), first_valid_element);
|
||||||
|
VLOG(3) << "first/last element in buffer: "
|
||||||
|
<< data_.front().stamp
|
||||||
|
<< " " << data_.back().stamp;
|
||||||
|
VLOG(3) << "number of images in the buffer: " << data_.size();
|
||||||
|
|
||||||
|
CHECK_LE(data_.back().stamp - data_.front().stamp, buffer_size_ns_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool CameraSimulator::imageCallback(const Image &img, Time time,
|
||||||
|
const ImagePtr& camera_image)
|
||||||
|
{
|
||||||
|
CHECK(camera_image);
|
||||||
|
CHECK_EQ(camera_image->size(), img.size());
|
||||||
|
|
||||||
|
buffer_->addImage(time, img);
|
||||||
|
|
||||||
|
static const Time initial_time = time;
|
||||||
|
if(time - initial_time < exposure_time_)
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "The images do not cover a time span long enough to simulate the exposure time accurately.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// average all the images in the buffer to simulate motion blur
|
||||||
|
camera_image->setTo(0);
|
||||||
|
ze::real_t denom = 0.;
|
||||||
|
for(const ImageBuffer::ImageData& img : buffer_->getRawBuffer())
|
||||||
|
{
|
||||||
|
*camera_image += ze::nanosecToMillisecTrunc(img.exposure_time) * img.image;
|
||||||
|
denom += ze::nanosecToMillisecTrunc(img.exposure_time);
|
||||||
|
}
|
||||||
|
*camera_image /= denom;
|
||||||
|
cv::Mat disp;
|
||||||
|
camera_image->convertTo(disp, CV_8U, 255);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
117
event_camera_simulator/esim/src/event_simulator.cpp
Normal file
117
event_camera_simulator/esim/src/event_simulator.cpp
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
#include <esim/esim/event_simulator.hpp>
|
||||||
|
#include <ze/common/random.hpp>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
#include <opencv2/imgproc/imgproc.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
void EventSimulator::init(const Image &img, Time time)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Initialized event camera simulator with sensor size: " << img.size();
|
||||||
|
VLOG(1) << "and contrast thresholds: C+ = " << config_.Cp << " , C- = " << config_.Cm;
|
||||||
|
is_initialized_ = true;
|
||||||
|
last_img_ = img.clone();
|
||||||
|
ref_values_ = img.clone();
|
||||||
|
last_event_timestamp_ = TimestampImage::zeros(img.size());
|
||||||
|
current_time_ = time;
|
||||||
|
size_ = img.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
Events EventSimulator::imageCallback(const Image& img, Time time)
|
||||||
|
{
|
||||||
|
CHECK_GE(time, 0);
|
||||||
|
Image preprocessed_img = img.clone();
|
||||||
|
if(config_.use_log_image)
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(INFO, 1) << "Converting the image to log image with eps = " << config_.log_eps << ".";
|
||||||
|
cv::log(config_.log_eps + img, preprocessed_img);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!is_initialized_)
|
||||||
|
{
|
||||||
|
init(preprocessed_img, time);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each pixel, check if new events need to be generated since the last image sample
|
||||||
|
static constexpr ImageFloatType tolerance = 1e-6;
|
||||||
|
Events events;
|
||||||
|
Duration delta_t_ns = time - current_time_;
|
||||||
|
|
||||||
|
CHECK_GT(delta_t_ns, 0u);
|
||||||
|
CHECK_EQ(img.size(), size_);
|
||||||
|
|
||||||
|
for (int y = 0; y < size_.height; ++y)
|
||||||
|
{
|
||||||
|
for (int x = 0; x < size_.width; ++x)
|
||||||
|
{
|
||||||
|
ImageFloatType itdt = preprocessed_img(y, x);
|
||||||
|
ImageFloatType it = last_img_(y, x);
|
||||||
|
ImageFloatType prev_cross = ref_values_(y, x);
|
||||||
|
|
||||||
|
if (std::fabs (it - itdt) > tolerance)
|
||||||
|
{
|
||||||
|
ImageFloatType pol = (itdt >= it) ? +1.0 : -1.0;
|
||||||
|
ImageFloatType C = (pol > 0) ? config_.Cp : config_.Cm;
|
||||||
|
ImageFloatType sigma_C = (pol > 0) ? config_.sigma_Cp : config_.sigma_Cm;
|
||||||
|
if(sigma_C > 0)
|
||||||
|
{
|
||||||
|
C += ze::sampleNormalDistribution<ImageFloatType>(false, 0, sigma_C);
|
||||||
|
}
|
||||||
|
ImageFloatType curr_cross = prev_cross;
|
||||||
|
bool all_crossings = false;
|
||||||
|
|
||||||
|
do
|
||||||
|
{
|
||||||
|
curr_cross += pol * C;
|
||||||
|
|
||||||
|
if ((pol > 0 && curr_cross > it && curr_cross <= itdt)
|
||||||
|
|| (pol < 0 && curr_cross < it && curr_cross >= itdt))
|
||||||
|
{
|
||||||
|
Duration edt = (curr_cross - it) * delta_t_ns / (itdt - it);
|
||||||
|
Time t = current_time_ + edt;
|
||||||
|
|
||||||
|
// check that pixel (x,y) is not currently in a "refractory" state
|
||||||
|
// i.e. |t-that last_timestamp(x,y)| >= refractory_period
|
||||||
|
const Time last_stamp_at_xy = ze::secToNanosec(last_event_timestamp_(y,x));
|
||||||
|
CHECK_GE(t, last_stamp_at_xy);
|
||||||
|
const Duration dt = t - last_stamp_at_xy;
|
||||||
|
if(last_event_timestamp_(y,x) == 0 || dt >= config_.refractory_period_ns)
|
||||||
|
{
|
||||||
|
events.push_back(Event(x, y, t, pol > 0));
|
||||||
|
last_event_timestamp_(y,x) = ze::nanosecToSecTrunc(t);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
VLOG(3) << "Dropping event because time since last event ("
|
||||||
|
<< dt << " ns) < refractory period ("
|
||||||
|
<< config_.refractory_period_ns << " ns).";
|
||||||
|
}
|
||||||
|
ref_values_(y,x) = curr_cross;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
all_crossings = true;
|
||||||
|
}
|
||||||
|
} while (!all_crossings);
|
||||||
|
} // end tolerance
|
||||||
|
} // end for each pixel
|
||||||
|
}
|
||||||
|
|
||||||
|
// update simvars for next loop
|
||||||
|
current_time_ = time;
|
||||||
|
last_img_ = preprocessed_img.clone(); // it is now the latest image
|
||||||
|
|
||||||
|
// Sort the events by increasing timestamps, since this is what
|
||||||
|
// most event processing algorithms expect
|
||||||
|
sort(events.begin(), events.end(),
|
||||||
|
[](const Event& a, const Event& b) -> bool
|
||||||
|
{
|
||||||
|
return a.t < b.t;
|
||||||
|
});
|
||||||
|
|
||||||
|
return events;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
137
event_camera_simulator/esim/src/simulator.cpp
Normal file
137
event_camera_simulator/esim/src/simulator.cpp
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
#include <esim/esim/simulator.hpp>
|
||||||
|
#include <ze/common/timer_collection.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DECLARE_TIMER(TimerEventSimulator, timers_event_simulator_,
|
||||||
|
simulate_events,
|
||||||
|
visualization
|
||||||
|
);
|
||||||
|
|
||||||
|
Simulator::~Simulator()
|
||||||
|
{
|
||||||
|
timers_event_simulator_.saveToFile("/tmp", "event_simulator.csv");
|
||||||
|
}
|
||||||
|
|
||||||
|
void Simulator::dataProviderCallback(const SimulatorData &sim_data)
|
||||||
|
{
|
||||||
|
CHECK_EQ(event_simulators_.size(), num_cameras_);
|
||||||
|
|
||||||
|
if(sim_data.images_updated)
|
||||||
|
{
|
||||||
|
EventsVector events(num_cameras_);
|
||||||
|
Time time = sim_data.timestamp;
|
||||||
|
// simulate the events and camera images for every sensor in the rig
|
||||||
|
{
|
||||||
|
auto t = timers_event_simulator_[TimerEventSimulator::simulate_events].timeScope();
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
events[i] = event_simulators_[i].imageCallback(*sim_data.images[i], time);
|
||||||
|
|
||||||
|
if(corrupted_camera_images_.size() < num_cameras_)
|
||||||
|
{
|
||||||
|
// allocate memory for the corrupted camera images and set them to 0
|
||||||
|
corrupted_camera_images_.emplace_back(std::make_shared<Image>(sim_data.images[i]->size()));
|
||||||
|
corrupted_camera_images_[i]->setTo(0.);
|
||||||
|
}
|
||||||
|
|
||||||
|
camera_simulators_[i].imageCallback(*sim_data.images[i], time, corrupted_camera_images_[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish the simulation data + events
|
||||||
|
{
|
||||||
|
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
|
||||||
|
publishData(sim_data, events, corrupted_camera_images_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
{
|
||||||
|
// just forward the simulation data to the publisher
|
||||||
|
auto t = timers_event_simulator_[TimerEventSimulator::visualization].timeScope();
|
||||||
|
publishData(sim_data, {}, corrupted_camera_images_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Simulator::publishData(const SimulatorData& sim_data,
|
||||||
|
const EventsVector& events,
|
||||||
|
const ImagePtrVector& camera_images)
|
||||||
|
{
|
||||||
|
if(publishers_.empty())
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "No publisher available";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Time time = sim_data.timestamp;
|
||||||
|
const Transformation& T_W_B = sim_data.groundtruth.T_W_B;
|
||||||
|
const TransformationVector& T_W_Cs = sim_data.groundtruth.T_W_Cs;
|
||||||
|
const ze::CameraRig::Ptr& camera_rig = sim_data.camera_rig;
|
||||||
|
|
||||||
|
// Publish the new data (events, images, depth maps, poses, point clouds, etc.)
|
||||||
|
if(!events.empty())
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->eventsCallback(events);
|
||||||
|
}
|
||||||
|
if(sim_data.poses_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->poseCallback(T_W_B, T_W_Cs, time);
|
||||||
|
}
|
||||||
|
if(sim_data.twists_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->twistCallback(sim_data.groundtruth.angular_velocities_,
|
||||||
|
sim_data.groundtruth.linear_velocities_,
|
||||||
|
time);
|
||||||
|
}
|
||||||
|
if(sim_data.imu_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->imuCallback(sim_data.specific_force_corrupted, sim_data.angular_velocity_corrupted, time);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(camera_rig)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->cameraInfoCallback(camera_rig, time);
|
||||||
|
}
|
||||||
|
if(sim_data.images_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
{
|
||||||
|
publisher->imageCallback(sim_data.images, time);
|
||||||
|
|
||||||
|
// the images should be timestamped at mid-exposure (unless it is not possible)
|
||||||
|
const Time mid_exposure_time = (time >= 0.5 * exposure_time_) ? time - 0.5 * exposure_time_ : time;
|
||||||
|
publisher->imageCorruptedCallback(camera_images, mid_exposure_time);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(sim_data.depthmaps_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->depthmapCallback(sim_data.depthmaps, time);
|
||||||
|
}
|
||||||
|
if(sim_data.optic_flows_updated)
|
||||||
|
{
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->opticFlowCallback(sim_data.optic_flows, time);
|
||||||
|
}
|
||||||
|
if(sim_data.depthmaps_updated && !events.empty())
|
||||||
|
{
|
||||||
|
PointCloudVector pointclouds(num_cameras_);
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(sim_data.depthmaps[i]);
|
||||||
|
pointclouds[i] = eventsToPointCloud(events[i], *sim_data.depthmaps[i], camera_rig->atShared(i));
|
||||||
|
}
|
||||||
|
for(const Publisher::Ptr& publisher : publishers_)
|
||||||
|
publisher->pointcloudCallback(pointclouds, time);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
237
event_camera_simulator/esim/test/test_event_simulator.cpp
Normal file
237
event_camera_simulator/esim/test/test_event_simulator.cpp
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
#include <esim/esim/event_simulator.hpp>
|
||||||
|
#include <ze/common/test_entrypoint.hpp>
|
||||||
|
#include <fstream>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
#include <ze/common/path_utils.hpp>
|
||||||
|
#include <ze/common/string_utils.hpp>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <ze/matplotlib/matplotlibcpp.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
#define USE_OPENCV
|
||||||
|
|
||||||
|
namespace event_camera_simulator
|
||||||
|
{
|
||||||
|
|
||||||
|
class CSVImageLoader
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CSVImageLoader(const std::string& path_to_data_folder)
|
||||||
|
: path_to_data_folder_(path_to_data_folder)
|
||||||
|
{
|
||||||
|
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv"));
|
||||||
|
CHECK(images_in_str_.is_open());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool next(int64_t& stamp, Image& img)
|
||||||
|
{
|
||||||
|
std::string line;
|
||||||
|
if(!getline(images_in_str_, line))
|
||||||
|
{
|
||||||
|
LOG(INFO) << "Finished reading all the images in the folder";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if('%' != line.at(0) && '#' != line.at(0))
|
||||||
|
{
|
||||||
|
std::vector<std::string> items = ze::splitString(line, delimiter_);
|
||||||
|
stamp = std::stoll(items[0]);
|
||||||
|
|
||||||
|
const std::string& path_to_image
|
||||||
|
= ze::joinPath(path_to_data_folder_, "frame", "cam_0", items[1]);
|
||||||
|
|
||||||
|
img = cv::imread(path_to_image, 0);
|
||||||
|
CHECK(img.data) << "Error loading image: " << path_to_image;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return next(stamp, img);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::ifstream images_in_str_;
|
||||||
|
const char delimiter_{','};
|
||||||
|
std::string path_to_data_folder_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
||||||
|
|
||||||
|
std::string getTestDataDir(const std::string& dataset_name)
|
||||||
|
{
|
||||||
|
using namespace ze;
|
||||||
|
|
||||||
|
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
|
||||||
|
CHECK(datapath_dir != nullptr)
|
||||||
|
<< "Did you download the esim_test_data repository and set "
|
||||||
|
<< "the ESIM_TEST_DATA_PATH environment variable?";
|
||||||
|
|
||||||
|
std::string path(datapath_dir);
|
||||||
|
CHECK(isDir(path)) << "Folder does not exist: " << path;
|
||||||
|
path = path + "/data/" + dataset_name;
|
||||||
|
CHECK(isDir(path)) << "Dataset does not exist: " << path;
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(EventSimulator, testImageReconstruction)
|
||||||
|
{
|
||||||
|
using namespace event_camera_simulator;
|
||||||
|
|
||||||
|
// Load image sequence from folder
|
||||||
|
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
|
||||||
|
CSVImageLoader reader(path_to_data_folder);
|
||||||
|
|
||||||
|
EventSimulator::Config event_sim_config;
|
||||||
|
event_sim_config.Cp = 0.05;
|
||||||
|
event_sim_config.Cm = 0.03;
|
||||||
|
event_sim_config.sigma_Cp = 0;
|
||||||
|
event_sim_config.sigma_Cm = 0;
|
||||||
|
event_sim_config.use_log_image = true;
|
||||||
|
event_sim_config.log_eps = 0.001;
|
||||||
|
EventSimulator simulator(event_sim_config);
|
||||||
|
|
||||||
|
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp
|
||||||
|
<< ", C- = " << event_sim_config.Cm;
|
||||||
|
|
||||||
|
const ImageFloatType max_reconstruction_error
|
||||||
|
= std::max(event_sim_config.Cp, event_sim_config.Cm);
|
||||||
|
|
||||||
|
bool is_first_image = true;
|
||||||
|
Image I, L, L_reconstructed;
|
||||||
|
int64_t stamp;
|
||||||
|
while(reader.next(stamp, I))
|
||||||
|
{
|
||||||
|
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.);
|
||||||
|
cv::log(event_sim_config.log_eps + I, L);
|
||||||
|
|
||||||
|
if(is_first_image)
|
||||||
|
{
|
||||||
|
// Initialize reconstructed image from the ground truth image
|
||||||
|
L_reconstructed = L.clone();
|
||||||
|
is_first_image = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Events events = simulator.imageCallback(I, stamp);
|
||||||
|
|
||||||
|
// Reconstruct next image from previous one using the events in between
|
||||||
|
for(const Event& e : events)
|
||||||
|
{
|
||||||
|
ImageFloatType pol = e.pol ? 1. : -1.;
|
||||||
|
const ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm;
|
||||||
|
L_reconstructed(e.y,e.x) += pol * C;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the reconstruction error is bounded by the contrast thresholds
|
||||||
|
for(int y=0; y<I.rows; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<I.cols; ++x)
|
||||||
|
{
|
||||||
|
const ImageFloatType reconstruction_error = std::fabs(L_reconstructed(y,x) - L(y,x));
|
||||||
|
VLOG_EVERY_N(2, I.rows * I.cols) << reconstruction_error;
|
||||||
|
EXPECT_LE(reconstruction_error, max_reconstruction_error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef USE_OPENCV
|
||||||
|
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
|
||||||
|
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
|
||||||
|
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
|
||||||
|
disp.convertTo(disp, CV_8U);
|
||||||
|
cv::imshow("Reconstructed", disp);
|
||||||
|
cv::waitKey(1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(EventSimulator, testEvolutionReconstructionError)
|
||||||
|
{
|
||||||
|
using namespace event_camera_simulator;
|
||||||
|
|
||||||
|
// Load image sequence from folder
|
||||||
|
const std::string path_to_data_folder = getTestDataDir("planar_carpet");
|
||||||
|
CSVImageLoader reader(path_to_data_folder);
|
||||||
|
|
||||||
|
EventSimulator::Config event_sim_config;
|
||||||
|
event_sim_config.Cp = 0.5;
|
||||||
|
event_sim_config.Cm = event_sim_config.Cp;
|
||||||
|
event_sim_config.sigma_Cp = 0;
|
||||||
|
event_sim_config.sigma_Cm = event_sim_config.sigma_Cp;
|
||||||
|
event_sim_config.use_log_image = true;
|
||||||
|
event_sim_config.log_eps = 0.001;
|
||||||
|
EventSimulator simulator(event_sim_config);
|
||||||
|
const double contrast_bias = 0.0;
|
||||||
|
|
||||||
|
LOG(INFO) << "Testing event camera simulator with C+ = " << event_sim_config.Cp
|
||||||
|
<< ", C- = " << event_sim_config.Cm;
|
||||||
|
|
||||||
|
std::vector<ze::real_t> times, mean_errors, stddev_errors;
|
||||||
|
bool is_first_image = true;
|
||||||
|
Image I, L, L_reconstructed;
|
||||||
|
int64_t stamp;
|
||||||
|
while(reader.next(stamp, I))
|
||||||
|
{
|
||||||
|
LOG_EVERY_N(INFO, 50) << "t = " << ze::nanosecToSecTrunc(stamp) << " s";
|
||||||
|
I.convertTo(I, cv::DataType<ImageFloatType>::type, 1./255.);
|
||||||
|
cv::log(event_sim_config.log_eps + I, L);
|
||||||
|
|
||||||
|
if(is_first_image)
|
||||||
|
{
|
||||||
|
// Initialize reconstructed image from the ground truth image
|
||||||
|
L_reconstructed = L.clone();
|
||||||
|
is_first_image = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Events events = simulator.imageCallback(I, stamp);
|
||||||
|
|
||||||
|
// Reconstruct next image from previous one using the events in between
|
||||||
|
for(const Event& e : events)
|
||||||
|
{
|
||||||
|
ImageFloatType pol = e.pol ? 1. : -1.;
|
||||||
|
ImageFloatType C = e.pol ? event_sim_config.Cp : event_sim_config.Cm;
|
||||||
|
C += contrast_bias;
|
||||||
|
L_reconstructed(e.y,e.x) += pol * C;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the mean and average reconstruction error over the whole image
|
||||||
|
Image error;
|
||||||
|
cv::absdiff(L, L_reconstructed, error);
|
||||||
|
cv::Scalar mean_error, stddev_error;
|
||||||
|
cv::meanStdDev(error, mean_error, stddev_error);
|
||||||
|
VLOG(1) << "Mean error: " << mean_error
|
||||||
|
<< ", Stddev: " << stddev_error;
|
||||||
|
|
||||||
|
times.push_back(ze::nanosecToSecTrunc(stamp));
|
||||||
|
mean_errors.push_back(mean_error[0]);
|
||||||
|
stddev_errors.push_back(stddev_error[0]);
|
||||||
|
|
||||||
|
#ifdef USE_OPENCV
|
||||||
|
const ImageFloatType vmin = std::log(event_sim_config.log_eps);
|
||||||
|
const ImageFloatType vmax = std::log(1.0 + event_sim_config.log_eps);
|
||||||
|
cv::Mat disp = 255.0 * (L_reconstructed - vmin) / (vmax - vmin);
|
||||||
|
disp.convertTo(disp, CV_8U);
|
||||||
|
cv::imshow("Reconstructed", disp);
|
||||||
|
cv::waitKey(1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plot the mean and stddev reconstruction error over time
|
||||||
|
ze::plt::plot(times, mean_errors, "r");
|
||||||
|
ze::plt::plot(times, stddev_errors, "b--");
|
||||||
|
std::stringstream title;
|
||||||
|
title << "C = " << event_sim_config.Cp
|
||||||
|
<< ", sigma = " << event_sim_config.sigma_Cp
|
||||||
|
<< ", bias = " << contrast_bias;
|
||||||
|
ze::plt::title(title.str());
|
||||||
|
ze::plt::xlabel("Time (s)");
|
||||||
|
ze::plt::ylabel("Mean / Stddev reconstruction error");
|
||||||
|
ze::plt::grid(true);
|
||||||
|
ze::plt::save("/tmp/evolution_reconstruction_error.pdf");
|
||||||
|
ze::plt::show();
|
||||||
|
}
|
||||||
|
|
||||||
|
ZE_UNITTEST_ENTRYPOINT
|
28
event_camera_simulator/esim_common/CMakeLists.txt
Normal file
28
event_camera_simulator/esim_common/CMakeLists.txt
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_common)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/common/types.hpp
|
||||||
|
include/esim/common/utils.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/utils.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
##########
|
||||||
|
# GTESTS #
|
||||||
|
##########
|
||||||
|
|
||||||
|
catkin_add_gtest(test_utils test/test_utils.cpp)
|
||||||
|
target_link_libraries(test_utils ${PROJECT_NAME} ${OpenCV_LIBRARIES})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
160
event_camera_simulator/esim_common/include/esim/common/types.hpp
Normal file
160
event_camera_simulator/esim_common/include/esim/common/types.hpp
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <ze/common/transformation.hpp>
|
||||||
|
#include <opencv2/core/core.hpp>
|
||||||
|
#include <ze/cameras/camera_rig.hpp>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
// FloatType defines the floating point accuracy (single or double precision)
|
||||||
|
// for the geometric operations (computing rotation matrices, point projection, etc.).
|
||||||
|
// This should typically be double precision (highest accuracy).
|
||||||
|
#define FloatType ze::real_t
|
||||||
|
|
||||||
|
// ImageFloatType defines the floating point accuracy (single or double precision)
|
||||||
|
// of the intensity images (and depth images).
|
||||||
|
// Single precision should be enough there in most cases.
|
||||||
|
#define ImageFloatType float
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
using Translation = ze::Position;
|
||||||
|
using Vector3 = ze::Vector3;
|
||||||
|
using Vector4 = ze::Vector4;
|
||||||
|
using Vector3i = Eigen::Vector3i;
|
||||||
|
|
||||||
|
using Transformation = ze::Transformation;
|
||||||
|
using TransformationVector = ze::TransformationVector;
|
||||||
|
using TransformationPtr = std::shared_ptr<Transformation>;
|
||||||
|
|
||||||
|
using Normal = ze::Vector3;
|
||||||
|
using CalibrationMatrix = ze::Matrix3;
|
||||||
|
using RotationMatrix = ze::Matrix3;
|
||||||
|
using HomographyMatrix = ze::Matrix3;
|
||||||
|
|
||||||
|
using AngularVelocity = ze::Vector3;
|
||||||
|
using LinearVelocity = ze::Vector3;
|
||||||
|
using AngularVelocityVector = std::vector<AngularVelocity>;
|
||||||
|
using LinearVelocityVector = std::vector<LinearVelocity>;
|
||||||
|
|
||||||
|
using uint16_t = ze::uint16_t;
|
||||||
|
|
||||||
|
using Time = ze::int64_t;
|
||||||
|
using Duration = ze::uint64_t;
|
||||||
|
using Image = cv::Mat_<ImageFloatType>;
|
||||||
|
using ImagePtr = std::shared_ptr<Image>;
|
||||||
|
using Depthmap = cv::Mat_<ImageFloatType>;
|
||||||
|
using OpticFlow = cv::Mat_< cv::Vec<ImageFloatType, 2> >;
|
||||||
|
using OpticFlowPtr = std::shared_ptr<OpticFlow>;
|
||||||
|
using DepthmapPtr = std::shared_ptr<Depthmap>;
|
||||||
|
|
||||||
|
using ImagePtrVector = std::vector<ImagePtr>;
|
||||||
|
using DepthmapPtrVector = std::vector<DepthmapPtr>;
|
||||||
|
using OpticFlowPtrVector = std::vector<OpticFlowPtr>;
|
||||||
|
|
||||||
|
using Camera = ze::Camera;
|
||||||
|
|
||||||
|
struct Event
|
||||||
|
{
|
||||||
|
Event(uint16_t x, uint16_t y, Time t, bool pol)
|
||||||
|
: x(x),
|
||||||
|
y(y),
|
||||||
|
t(t),
|
||||||
|
pol(pol)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t x;
|
||||||
|
uint16_t y;
|
||||||
|
Time t;
|
||||||
|
bool pol;
|
||||||
|
};
|
||||||
|
|
||||||
|
using Events = std::vector<Event>;
|
||||||
|
using EventsVector = std::vector<Events>;
|
||||||
|
using EventsPtr = std::shared_ptr<Events>;
|
||||||
|
|
||||||
|
struct PointXYZRGB
|
||||||
|
{
|
||||||
|
PointXYZRGB(FloatType x, FloatType y, FloatType z,
|
||||||
|
int red, int green, int blue)
|
||||||
|
: xyz(x, y, z),
|
||||||
|
rgb(red, green, blue) {}
|
||||||
|
|
||||||
|
PointXYZRGB(const Vector3& xyz)
|
||||||
|
: xyz(xyz) {}
|
||||||
|
|
||||||
|
PointXYZRGB(const Vector3& xyz, const Vector3i& rgb)
|
||||||
|
: xyz(xyz),
|
||||||
|
rgb(rgb) {}
|
||||||
|
|
||||||
|
Vector3 xyz;
|
||||||
|
Vector3i rgb;
|
||||||
|
};
|
||||||
|
|
||||||
|
using PointCloud = std::vector<PointXYZRGB>;
|
||||||
|
using PointCloudVector = std::vector<PointCloud>;
|
||||||
|
|
||||||
|
struct SimulatorData
|
||||||
|
{
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
|
|
||||||
|
//! Nanosecond timestamp.
|
||||||
|
Time timestamp;
|
||||||
|
|
||||||
|
//! Camera images.
|
||||||
|
ImagePtrVector images;
|
||||||
|
|
||||||
|
//! Depth maps.
|
||||||
|
DepthmapPtrVector depthmaps;
|
||||||
|
|
||||||
|
//! Optic flow maps.
|
||||||
|
OpticFlowPtrVector optic_flows;
|
||||||
|
|
||||||
|
//! Camera
|
||||||
|
ze::CameraRig::Ptr camera_rig;
|
||||||
|
|
||||||
|
//! An accelerometer measures the specific force (incl. gravity),
|
||||||
|
//! corrupted by noise and bias.
|
||||||
|
Vector3 specific_force_corrupted;
|
||||||
|
|
||||||
|
//! The angular velocity (in the body frame) corrupted by noise and bias.
|
||||||
|
Vector3 angular_velocity_corrupted;
|
||||||
|
|
||||||
|
//! Groundtruth states.
|
||||||
|
struct Groundtruth
|
||||||
|
{
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
|
|
||||||
|
//! Pose of the body (i.e. the IMU) expressed in the world frame.
|
||||||
|
Transformation T_W_B;
|
||||||
|
|
||||||
|
//! Accelerometer and gyro bias
|
||||||
|
Vector3 acc_bias;
|
||||||
|
Vector3 gyr_bias;
|
||||||
|
|
||||||
|
//! Poses of the cameras in the rig expressed in the world frame.
|
||||||
|
TransformationVector T_W_Cs;
|
||||||
|
|
||||||
|
//! Linear and angular velocities (i.e. twists) of the cameras in the rig,
|
||||||
|
//! expressed in each camera's local coordinate frame.
|
||||||
|
LinearVelocityVector linear_velocities_;
|
||||||
|
AngularVelocityVector angular_velocities_;
|
||||||
|
|
||||||
|
// dynamic objects
|
||||||
|
std::vector<Transformation> T_W_OBJ_;
|
||||||
|
std::vector<LinearVelocity> linear_velocity_obj_;
|
||||||
|
std::vector<AngularVelocity> angular_velocity_obj_;
|
||||||
|
};
|
||||||
|
Groundtruth groundtruth;
|
||||||
|
|
||||||
|
// Flags to indicate whether a value has been updated or not
|
||||||
|
bool images_updated;
|
||||||
|
bool depthmaps_updated;
|
||||||
|
bool optic_flows_updated;
|
||||||
|
bool twists_updated;
|
||||||
|
bool poses_updated;
|
||||||
|
bool imu_updated;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,59 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
|
||||||
|
namespace ze {
|
||||||
|
class Camera;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
inline double degToRad(double deg)
|
||||||
|
{
|
||||||
|
return deg * CV_PI / 180.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline double hfovToFocalLength(double hfov_deg, int W)
|
||||||
|
{
|
||||||
|
return 0.5 * static_cast<double>(W) / std::tan(0.5 * degToRad(hfov_deg));
|
||||||
|
}
|
||||||
|
|
||||||
|
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera);
|
||||||
|
|
||||||
|
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera);
|
||||||
|
|
||||||
|
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow);
|
||||||
|
|
||||||
|
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow);
|
||||||
|
|
||||||
|
void gaussianBlur(ImagePtr& I, FloatType sigma);
|
||||||
|
|
||||||
|
// Helper class to compute optic flow from a twist vector and depth map
|
||||||
|
// Precomputes a lookup table for pixel -> bearing vector correspondences
|
||||||
|
// to accelerate the computation
|
||||||
|
class OpticFlowHelper
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
|
|
||||||
|
ZE_POINTER_TYPEDEFS(OpticFlowHelper);
|
||||||
|
|
||||||
|
OpticFlowHelper(const ze::Camera::Ptr& camera);
|
||||||
|
|
||||||
|
void computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC,
|
||||||
|
const DepthmapPtr& depthmap, OpticFlowPtr& flow);
|
||||||
|
|
||||||
|
void computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap,
|
||||||
|
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ,
|
||||||
|
OpticFlowPtr& flow);
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
void precomputePixelToBearingLookupTable();
|
||||||
|
|
||||||
|
ze::Camera::Ptr camera_;
|
||||||
|
|
||||||
|
// Precomputed lookup table from keypoint -> bearing vector
|
||||||
|
ze::Bearings bearings_C_;
|
||||||
|
};
|
||||||
|
} // namespace event_camera_simulator
|
19
event_camera_simulator/esim_common/package.xml
Normal file
19
event_camera_simulator/esim_common/package.xml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_common</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Common data types and utils for the event camera simulator.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>ze_common</depend>
|
||||||
|
<depend>ze_cameras</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
190
event_camera_simulator/esim_common/src/utils.cpp
Normal file
190
event_camera_simulator/esim_common/src/utils.cpp
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/cameras/camera_models.hpp>
|
||||||
|
#include <opencv2/imgproc/imgproc.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
OpticFlowHelper::OpticFlowHelper(const ze::Camera::Ptr& camera)
|
||||||
|
: camera_(camera)
|
||||||
|
{
|
||||||
|
CHECK(camera_);
|
||||||
|
precomputePixelToBearingLookupTable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OpticFlowHelper::precomputePixelToBearingLookupTable()
|
||||||
|
{
|
||||||
|
// points_C is a matrix containing the coordinates of each pixel coordinate in the image plane
|
||||||
|
ze::Keypoints points_C(2, camera_->width() * camera_->height());
|
||||||
|
for(int y=0; y<camera_->height(); ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<camera_->width(); ++x)
|
||||||
|
{
|
||||||
|
points_C.col(x + camera_->width() * y) = ze::Keypoint(x,y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bearings_C_ = camera_->backProjectVectorized(points_C);
|
||||||
|
bearings_C_.array().rowwise() /= bearings_C_.row(2).array();
|
||||||
|
}
|
||||||
|
|
||||||
|
void OpticFlowHelper::computeFromDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC,
|
||||||
|
const DepthmapPtr& depthmap, OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
CHECK(depthmap);
|
||||||
|
CHECK_EQ(depthmap->rows, camera_->height());
|
||||||
|
CHECK_EQ(depthmap->cols, camera_->width());
|
||||||
|
CHECK(depthmap->isContinuous());
|
||||||
|
|
||||||
|
const ze::Vector3 w_CW = -w_WC; // rotation speed of the world wrt the camera
|
||||||
|
const ze::Vector3 v_CW = -v_WC; // speed of the world wrt the camera
|
||||||
|
const ze::Matrix33 R_CW = ze::skewSymmetric(w_CW);
|
||||||
|
|
||||||
|
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols);
|
||||||
|
ze::Positions Xs = bearings_C_;
|
||||||
|
Xs.array().rowwise() *= depths.cast<FloatType>().array();
|
||||||
|
|
||||||
|
ze::Matrix6X dproject_dX =
|
||||||
|
camera_->dProject_dLandmarkVectorized(Xs);
|
||||||
|
|
||||||
|
for(int y=0; y<camera_->height(); ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<camera_->width(); ++x)
|
||||||
|
{
|
||||||
|
const Vector3 X = Xs.col(x + camera_->width() * y);
|
||||||
|
ze::Matrix31 dXdt = R_CW * X + v_CW;
|
||||||
|
ze::Vector2 flow_vec
|
||||||
|
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt;
|
||||||
|
|
||||||
|
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OpticFlowHelper::computeFromDepthCamTwistAndObjDepthAndTwist(const ze::Vector3& w_WC, const ze::Vector3& v_WC, const DepthmapPtr& depthmap,
|
||||||
|
const ze::Vector3& r_COBJ, const ze::Vector3& w_WOBJ, const ze::Vector3& v_WOBJ, OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
CHECK(depthmap);
|
||||||
|
CHECK_EQ(depthmap->rows, camera_->height());
|
||||||
|
CHECK_EQ(depthmap->cols, camera_->width());
|
||||||
|
CHECK(depthmap->isContinuous());
|
||||||
|
|
||||||
|
const ze::Matrix33 w_WC_tilde = ze::skewSymmetric(w_WC);
|
||||||
|
const ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(w_WOBJ);
|
||||||
|
|
||||||
|
Eigen::Map<const Eigen::Matrix<ImageFloatType, 1, Eigen::Dynamic, Eigen::RowMajor>> depths(depthmap->ptr<ImageFloatType>(), 1, depthmap->rows * depthmap->cols);
|
||||||
|
ze::Positions Xs = bearings_C_;
|
||||||
|
Xs.array().rowwise() *= depths.cast<FloatType>().array();
|
||||||
|
|
||||||
|
ze::Matrix6X dproject_dX =
|
||||||
|
camera_->dProject_dLandmarkVectorized(Xs);
|
||||||
|
|
||||||
|
for(int y=0; y<camera_->height(); ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<camera_->width(); ++x)
|
||||||
|
{
|
||||||
|
const Vector3 r_CX = Xs.col(x + camera_->width() * y);
|
||||||
|
const Vector3 r_OBJX = r_CX - r_COBJ;
|
||||||
|
|
||||||
|
ze::Matrix31 dXdt = v_WOBJ - v_WC - w_WC_tilde*r_CX + w_WOBJ_tilde*r_OBJX;
|
||||||
|
ze::Vector2 flow_vec
|
||||||
|
= Eigen::Map<ze::Matrix23>(dproject_dX.col(x + camera_->width() * y).data()) * dXdt;
|
||||||
|
|
||||||
|
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FloatType maxMagnitudeOpticFlow(const OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
CHECK(flow);
|
||||||
|
FloatType max_squared_magnitude = 0;
|
||||||
|
for(int y=0; y<flow->rows; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<flow->cols; ++x)
|
||||||
|
{
|
||||||
|
const FloatType squared_magnitude = cv::norm((*flow)(y,x), cv::NORM_L2SQR);
|
||||||
|
if(squared_magnitude > max_squared_magnitude)
|
||||||
|
{
|
||||||
|
max_squared_magnitude = squared_magnitude;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return std::sqrt(max_squared_magnitude);
|
||||||
|
}
|
||||||
|
|
||||||
|
FloatType maxPredictedAbsBrightnessChange(const ImagePtr& I, const OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
const size_t height = I->rows;
|
||||||
|
const size_t width = I->cols;
|
||||||
|
|
||||||
|
Image Ix, Iy; // horizontal/vertical gradients of I
|
||||||
|
// the factor 1/8 accounts for the scaling introduced by the Sobel filter mask
|
||||||
|
//cv::Sobel(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 3, 1./8.);
|
||||||
|
//cv::Sobel(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 3, 1./8.);
|
||||||
|
|
||||||
|
// the factor 1/32 accounts for the scaling introduced by the Scharr filter mask
|
||||||
|
cv::Scharr(*I, Ix, cv::DataType<ImageFloatType>::type, 1, 0, 1./32.);
|
||||||
|
cv::Scharr(*I, Iy, cv::DataType<ImageFloatType>::type, 0, 1, 1./32.);
|
||||||
|
|
||||||
|
Image Lx, Ly; // horizontal/vertical gradients of log(I). d(logI)/dx = 1/I * dI/dx
|
||||||
|
static const ImageFloatType eps = 1e-3; // small additive term to avoid problems at I=0
|
||||||
|
cv::divide(Ix, *I+eps, Lx);
|
||||||
|
cv::divide(Iy, *I+eps, Ly);
|
||||||
|
|
||||||
|
Image dLdt(height, width);
|
||||||
|
for(int y=0; y<height; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<width; ++x)
|
||||||
|
{
|
||||||
|
// dL/dt ~= - <nablaL, flow>
|
||||||
|
const ImageFloatType dLdt_at_xy =
|
||||||
|
Lx(y,x) * (*flow)(y,x)[0] +
|
||||||
|
Ly(y,x) * (*flow)(y,x)[1]; // "-" sign ignored since we are interested in the absolute value...
|
||||||
|
dLdt(y,x) = std::fabs(dLdt_at_xy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
double min_dLdt, max_dLdt;
|
||||||
|
int min_idx, max_idx;
|
||||||
|
cv::minMaxIdx(dLdt, &min_dLdt, &max_dLdt,
|
||||||
|
&min_idx, &max_idx);
|
||||||
|
|
||||||
|
return static_cast<FloatType>(max_dLdt);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gaussianBlur(ImagePtr& I, FloatType sigma)
|
||||||
|
{
|
||||||
|
cv::GaussianBlur(*I, *I, cv::Size(15,15), sigma, sigma);
|
||||||
|
}
|
||||||
|
|
||||||
|
CalibrationMatrix calibrationMatrixFromCamera(const Camera::Ptr& camera)
|
||||||
|
{
|
||||||
|
CHECK(camera);
|
||||||
|
const ze::VectorX params = camera->projectionParameters();
|
||||||
|
CalibrationMatrix K;
|
||||||
|
K << params(0), 0, params(2),
|
||||||
|
0, params(1), params(3),
|
||||||
|
0, 0, 1;
|
||||||
|
return K;
|
||||||
|
}
|
||||||
|
|
||||||
|
PointCloud eventsToPointCloud(const Events& events, const Depthmap& depthmap, const ze::Camera::Ptr& camera)
|
||||||
|
{
|
||||||
|
PointCloud pcl_camera;
|
||||||
|
for(const Event& ev : events)
|
||||||
|
{
|
||||||
|
Vector3 X_c = camera->backProject(ze::Keypoint(ev.x,ev.y));
|
||||||
|
X_c[0] /= X_c[2];
|
||||||
|
X_c[1] /= X_c[2];
|
||||||
|
X_c[2] = 1.;
|
||||||
|
const ImageFloatType z = depthmap(ev.y,ev.x);
|
||||||
|
Vector3 P_c = z * X_c;
|
||||||
|
Vector3i rgb;
|
||||||
|
static const Vector3i red(255, 0, 0);
|
||||||
|
static const Vector3i blue(0, 0, 255);
|
||||||
|
rgb = (ev.pol) ? blue : red;
|
||||||
|
PointXYZRGB P_c_intensity(P_c, rgb);
|
||||||
|
pcl_camera.push_back(P_c_intensity);
|
||||||
|
}
|
||||||
|
return pcl_camera;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
213
event_camera_simulator/esim_common/test/test_utils.cpp
Normal file
213
event_camera_simulator/esim_common/test/test_utils.cpp
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/common/test_entrypoint.hpp>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
#include <ze/common/path_utils.hpp>
|
||||||
|
#include <ze/common/string_utils.hpp>
|
||||||
|
#include <ze/cameras/camera_rig.hpp>
|
||||||
|
#include <ze/common/random.hpp>
|
||||||
|
|
||||||
|
std::string getTestDataDir(const std::string& dataset_name)
|
||||||
|
{
|
||||||
|
using namespace ze;
|
||||||
|
|
||||||
|
const char* datapath_dir = std::getenv("ESIM_TEST_DATA_PATH");
|
||||||
|
CHECK(datapath_dir != nullptr)
|
||||||
|
<< "Did you download the esim_test_data repository and set "
|
||||||
|
<< "the ESIM_TEST_DATA_PATH environment variable?";
|
||||||
|
|
||||||
|
std::string path(datapath_dir);
|
||||||
|
CHECK(isDir(path)) << "Folder does not exist: " << path;
|
||||||
|
path = path + "/data/" + dataset_name;
|
||||||
|
CHECK(isDir(path)) << "Dataset does not exist: " << path;
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera,
|
||||||
|
const ze::Vector3& angular_velocity,
|
||||||
|
const ze::Vector3& linear_velocity,
|
||||||
|
const DepthmapPtr& depth,
|
||||||
|
OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
CHECK(flow);
|
||||||
|
CHECK_EQ(flow->rows, camera->height());
|
||||||
|
CHECK_EQ(flow->cols, camera->width());
|
||||||
|
|
||||||
|
const FloatType dt = 0.001;
|
||||||
|
|
||||||
|
for(int y=0; y<flow->rows; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<flow->cols; ++x)
|
||||||
|
{
|
||||||
|
ze::Keypoint u_t(x,y);
|
||||||
|
ze::Bearing f = camera->backProject(u_t);
|
||||||
|
const FloatType z = static_cast<FloatType>((*depth)(y,x));
|
||||||
|
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.);
|
||||||
|
|
||||||
|
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt);
|
||||||
|
ze::Transformation::Position dT = -linear_velocity * dt;
|
||||||
|
|
||||||
|
// Transform Xc(t) to Xc(t+dt)
|
||||||
|
ze::Transformation T_tdt_t;
|
||||||
|
T_tdt_t.getRotation() = dR;
|
||||||
|
T_tdt_t.getPosition() = dT;
|
||||||
|
VLOG(5) << T_tdt_t;
|
||||||
|
|
||||||
|
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
|
||||||
|
|
||||||
|
// Project Xc(t+dt) in the image plane
|
||||||
|
ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
|
||||||
|
VLOG(5) << u_t;
|
||||||
|
VLOG(5) << u_t_dt;
|
||||||
|
|
||||||
|
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
|
||||||
|
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void computeOpticFlowFiniteDifference(const ze::Camera::Ptr& camera,
|
||||||
|
const ze::Vector3& angular_velocity,
|
||||||
|
const ze::Vector3& linear_velocity,
|
||||||
|
const DepthmapPtr& depth,
|
||||||
|
const ze::Vector3& r_COBJ,
|
||||||
|
const ze::Vector3& angular_velocity_obj,
|
||||||
|
const ze::Vector3& linear_velocity_obj,
|
||||||
|
OpticFlowPtr& flow)
|
||||||
|
{
|
||||||
|
CHECK(flow);
|
||||||
|
CHECK_EQ(flow->rows, camera->height());
|
||||||
|
CHECK_EQ(flow->cols, camera->width());
|
||||||
|
|
||||||
|
const FloatType dt = 0.001;
|
||||||
|
|
||||||
|
for(int y=0; y<flow->rows; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<flow->cols; ++x)
|
||||||
|
{
|
||||||
|
ze::Keypoint u_t(x,y);
|
||||||
|
ze::Bearing f = camera->backProject(u_t);
|
||||||
|
const FloatType z = static_cast<FloatType>((*depth)(y,x));
|
||||||
|
ze::Position Xc_t = z * ze::Position(f[0]/f[2], f[1]/f[2], 1.);
|
||||||
|
ze::Position r_OBJX = Xc_t - r_COBJ;
|
||||||
|
ze::Matrix33 w_WOBJ_tilde = ze::skewSymmetric(angular_velocity_obj);
|
||||||
|
|
||||||
|
ze::Transformation::Rotation dR = ze::Transformation::Rotation::exp(-angular_velocity * dt);
|
||||||
|
ze::Transformation::Position dT = linear_velocity_obj*dt - linear_velocity * dt + w_WOBJ_tilde*r_OBJX*dt;
|
||||||
|
|
||||||
|
// Transform Xc(t) to Xc(t+dt)
|
||||||
|
ze::Transformation T_tdt_t;
|
||||||
|
T_tdt_t.getRotation() = dR;
|
||||||
|
T_tdt_t.getPosition() = dT;
|
||||||
|
VLOG(5) << T_tdt_t;
|
||||||
|
|
||||||
|
ze::Position Xc_t_dt = T_tdt_t.transform(Xc_t);
|
||||||
|
|
||||||
|
// Project Xc(t+dt) in the image plane
|
||||||
|
ze::Keypoint u_t_dt = camera->project(Xc_t_dt);
|
||||||
|
VLOG(5) << u_t;
|
||||||
|
VLOG(5) << u_t_dt;
|
||||||
|
|
||||||
|
ze::Vector2 flow_vec = (u_t_dt - u_t) / dt;
|
||||||
|
(*flow)(y,x) = cv::Vec<FloatType, 2>(flow_vec(0), flow_vec(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // event_camera_simulator
|
||||||
|
|
||||||
|
TEST(Utils, testOpticFlowComputation)
|
||||||
|
{
|
||||||
|
using namespace event_camera_simulator;
|
||||||
|
|
||||||
|
// Load camera calib from folder
|
||||||
|
const std::string path_to_data_folder = getTestDataDir("camera_calibs");
|
||||||
|
ze::CameraRig::Ptr camera_rig
|
||||||
|
= ze::cameraRigFromYaml(ze::joinPath(path_to_data_folder, "pinhole_mono.yaml"));
|
||||||
|
|
||||||
|
CHECK(camera_rig);
|
||||||
|
|
||||||
|
const ze::Camera::Ptr camera = camera_rig->atShared(0);
|
||||||
|
cv::Size sensor_size(camera->width(), camera->height());
|
||||||
|
|
||||||
|
OpticFlowPtr flow_analytic =
|
||||||
|
std::make_shared<OpticFlow>(sensor_size);
|
||||||
|
|
||||||
|
// Sample random depth map
|
||||||
|
const ImageFloatType z_mean = 5.0;
|
||||||
|
const ImageFloatType z_stddev = 0.5;
|
||||||
|
DepthmapPtr depth = std::make_shared<Depthmap>(sensor_size);
|
||||||
|
for(int y=0; y<sensor_size.height; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<sensor_size.width; ++x)
|
||||||
|
{
|
||||||
|
(*depth)(y,x) = ze::sampleNormalDistribution(true, z_mean, z_stddev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample random linear and angular velocity
|
||||||
|
ze::Vector3 angular_velocity, linear_velocity;
|
||||||
|
angular_velocity.setRandom();
|
||||||
|
linear_velocity.setRandom();
|
||||||
|
|
||||||
|
LOG(INFO) << "w = " << angular_velocity;
|
||||||
|
LOG(INFO) << "v = " << linear_velocity;
|
||||||
|
|
||||||
|
// Compute optic flow on the image plane according
|
||||||
|
// to the sampled angular/linear velocity
|
||||||
|
OpticFlowHelper optic_flow_helper(camera);
|
||||||
|
optic_flow_helper.computeFromDepthAndTwist(angular_velocity, linear_velocity,
|
||||||
|
depth, flow_analytic);
|
||||||
|
|
||||||
|
OpticFlowPtr flow_finite_diff =
|
||||||
|
std::make_shared<OpticFlow>(sensor_size);
|
||||||
|
|
||||||
|
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity,
|
||||||
|
depth, flow_finite_diff);
|
||||||
|
|
||||||
|
// Check that the analytical flow and finite-difference flow match
|
||||||
|
for(int y=0; y<sensor_size.height; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<sensor_size.width; ++x)
|
||||||
|
{
|
||||||
|
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1);
|
||||||
|
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**********************************************/
|
||||||
|
/* repeat optic flow test for dynamic objects */
|
||||||
|
/**********************************************/
|
||||||
|
|
||||||
|
// sample random obj position and linear and angular velocity
|
||||||
|
ze::Vector3 r_COBJ;
|
||||||
|
r_COBJ.setRandom();
|
||||||
|
r_COBJ(2) = ze::sampleNormalDistribution(true, z_mean, z_stddev); // assume object is in front of camera
|
||||||
|
|
||||||
|
ze::Vector3 angular_velocity_obj, linear_velocity_obj;
|
||||||
|
angular_velocity_obj.setRandom();
|
||||||
|
linear_velocity_obj.setRandom();
|
||||||
|
|
||||||
|
// Compute optic flow on the image plane according
|
||||||
|
// to the sampled angular/linear velocity
|
||||||
|
optic_flow_helper.computeFromDepthCamTwistAndObjDepthAndTwist(angular_velocity, linear_velocity, depth,
|
||||||
|
r_COBJ, angular_velocity_obj, linear_velocity_obj,
|
||||||
|
flow_analytic);
|
||||||
|
|
||||||
|
computeOpticFlowFiniteDifference(camera, angular_velocity, linear_velocity, depth,
|
||||||
|
r_COBJ, angular_velocity_obj, linear_velocity_obj,
|
||||||
|
flow_finite_diff);
|
||||||
|
|
||||||
|
// Check that the analytical flow and finite-difference flow match
|
||||||
|
for(int y=0; y<sensor_size.height; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<sensor_size.width; ++x)
|
||||||
|
{
|
||||||
|
EXPECT_NEAR((*flow_analytic)(y,x)[0], (*flow_finite_diff)(y,x)[0], 0.1);
|
||||||
|
EXPECT_NEAR((*flow_analytic)(y,x)[1], (*flow_finite_diff)(y,x)[1], 0.1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ZE_UNITTEST_ENTRYPOINT
|
32
event_camera_simulator/esim_data_provider/CMakeLists.txt
Normal file
32
event_camera_simulator/esim_data_provider/CMakeLists.txt
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_data_provider)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/data_provider/data_provider_base.hpp
|
||||||
|
include/esim/data_provider/data_provider_factory.hpp
|
||||||
|
include/esim/data_provider/data_provider_online_render.hpp
|
||||||
|
include/esim/data_provider/data_provider_online_simple.hpp
|
||||||
|
include/esim/data_provider/data_provider_from_folder.hpp
|
||||||
|
include/esim/data_provider/data_provider_rosbag.hpp
|
||||||
|
include/esim/data_provider/renderer_factory.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/data_provider_base.cpp
|
||||||
|
src/data_provider_factory.cpp
|
||||||
|
src/data_provider_online_render.cpp
|
||||||
|
src/data_provider_online_simple.cpp
|
||||||
|
src/data_provider_from_folder.cpp
|
||||||
|
src/data_provider_rosbag.cpp
|
||||||
|
src/renderer_factory.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,74 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <ze/common/noncopyable.hpp>
|
||||||
|
#include <ze/common/signal_handler.hpp>
|
||||||
|
|
||||||
|
// fwd
|
||||||
|
namespace cv {
|
||||||
|
class Mat;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
using Callback =
|
||||||
|
std::function<void (const SimulatorData& sim_data)>;
|
||||||
|
|
||||||
|
enum class DataProviderType {
|
||||||
|
RendererOnline,
|
||||||
|
Folder,
|
||||||
|
Rosbag
|
||||||
|
};
|
||||||
|
|
||||||
|
//! A data provider registers to a data source and triggers callbacks when
|
||||||
|
//! new data is available.
|
||||||
|
class DataProviderBase : ze::Noncopyable
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ZE_POINTER_TYPEDEFS(DataProviderBase);
|
||||||
|
|
||||||
|
DataProviderBase() = delete;
|
||||||
|
DataProviderBase(DataProviderType type);
|
||||||
|
virtual ~DataProviderBase() = default;
|
||||||
|
|
||||||
|
//! Process all callbacks. Waits until callback is processed.
|
||||||
|
void spin();
|
||||||
|
|
||||||
|
//! Read next data field and process callback. Returns false when datatset finished.
|
||||||
|
virtual bool spinOnce() = 0;
|
||||||
|
|
||||||
|
//! False if there is no more data to process or there was a shutdown signal.
|
||||||
|
virtual bool ok() const = 0;
|
||||||
|
|
||||||
|
//! Pause data provider.
|
||||||
|
virtual void pause();
|
||||||
|
|
||||||
|
//! Stop data provider.
|
||||||
|
virtual void shutdown();
|
||||||
|
|
||||||
|
//! Register callback function to call when new message is available.
|
||||||
|
void registerCallback(const Callback& callback);
|
||||||
|
|
||||||
|
//! Returns the number of cameras in the rig
|
||||||
|
virtual size_t numCameras() const = 0;
|
||||||
|
|
||||||
|
//! Returns the camera rig
|
||||||
|
ze::CameraRig::Ptr getCameraRig() { return camera_rig_; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
DataProviderType type_;
|
||||||
|
Callback callback_;
|
||||||
|
volatile bool running_ = true;
|
||||||
|
|
||||||
|
ze::CameraRig::Ptr camera_rig_;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ze::SimpleSigtermHandler signal_handler_; //!< Sets running_ to false when Ctrl-C is pressed.
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,10 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderBase::Ptr loadDataProviderFromGflags();
|
||||||
|
|
||||||
|
} // namespace ze
|
@ -0,0 +1,43 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <ze/common/types.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
#include <ze/cameras/camera_rig.hpp>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class DataProviderFromFolder : public DataProviderBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
|
|
||||||
|
DataProviderFromFolder(const std::string& path_to_data_folder);
|
||||||
|
|
||||||
|
virtual ~DataProviderFromFolder() = default;
|
||||||
|
|
||||||
|
virtual bool spinOnce() override;
|
||||||
|
|
||||||
|
virtual bool ok() const override;
|
||||||
|
|
||||||
|
size_t numCameras() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
int64_t getTimeStamp(const std::string& ts_str) const;
|
||||||
|
|
||||||
|
std::string path_to_data_folder_;
|
||||||
|
std::ifstream images_in_str_;
|
||||||
|
const char delimiter_{','};
|
||||||
|
const size_t num_tokens_in_line_ = 3; // stamp, image, depth
|
||||||
|
|
||||||
|
SimulatorData sim_data_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,75 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <ze/common/types.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
#include <esim/rendering/renderer_base.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
|
||||||
|
#include <ze/vi_simulation/trajectory_simulator.hpp>
|
||||||
|
#include <ze/vi_simulation/imu_simulator.hpp>
|
||||||
|
#include <ze/cameras/camera_rig.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Online data provider intended to simulate a camera rig composed of multiple
|
||||||
|
* cameras rigidly attached together, along with an Inertial Measurement Unit (IMU).
|
||||||
|
*
|
||||||
|
* The camera rig follows a camera trajectory in 3D.
|
||||||
|
*/
|
||||||
|
class DataProviderOnlineMoving3DCameraRig : public DataProviderBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
DataProviderOnlineMoving3DCameraRig(ze::real_t simulation_minimum_framerate,
|
||||||
|
ze::real_t simulation_imu_rate,
|
||||||
|
int simulation_adaptive_sampling_method,
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda);
|
||||||
|
|
||||||
|
virtual ~DataProviderOnlineMoving3DCameraRig();
|
||||||
|
|
||||||
|
virtual bool spinOnce() override;
|
||||||
|
|
||||||
|
virtual bool ok() const override;
|
||||||
|
|
||||||
|
size_t numCameras() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
void updateGroundtruth();
|
||||||
|
void sampleImu();
|
||||||
|
void sampleFrame();
|
||||||
|
|
||||||
|
void setImuUpdated();
|
||||||
|
void setFrameUpdated();
|
||||||
|
void setAllUpdated();
|
||||||
|
|
||||||
|
std::vector<Renderer::Ptr> renderers_;
|
||||||
|
std::vector<OpticFlowHelper::Ptr> optic_flow_helpers_;
|
||||||
|
ze::TrajectorySimulator::Ptr trajectory_;
|
||||||
|
ze::ImuSimulator::Ptr imu_;
|
||||||
|
SimulatorData sim_data_;
|
||||||
|
|
||||||
|
ze::real_t t_;
|
||||||
|
ze::real_t last_t_frame_; // latest next sampling time in order to guarantee the IMU rate
|
||||||
|
ze::real_t last_t_imu_; // latest next sampling time in order to guarantee the minimum frame rate
|
||||||
|
ze::real_t next_t_flow_; // latest next sampling time in order to guarantee that the max pixel displacement since the last frame is bounded
|
||||||
|
ze::real_t dt_imu_;
|
||||||
|
ze::real_t dt_frame_;
|
||||||
|
|
||||||
|
ze::real_t simulation_minimum_framerate_;
|
||||||
|
ze::real_t simulation_imu_rate_;
|
||||||
|
int simulation_adaptive_sampling_method_;
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda_;
|
||||||
|
|
||||||
|
// dynamic objects
|
||||||
|
std::vector<ze::TrajectorySimulator::Ptr> trajectory_dyn_obj_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,54 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <ze/common/types.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
#include <esim/rendering/simple_renderer_base.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simple online data provider, intended to simulate a single event camera
|
||||||
|
* based on images + optic flow maps provided by a rendering engine.
|
||||||
|
*
|
||||||
|
* This data provider does NOT simulate a camera trajectory or an IMU.
|
||||||
|
*/
|
||||||
|
class DataProviderOnlineSimple : public DataProviderBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
DataProviderOnlineSimple(ze::real_t simulation_minimum_framerate,
|
||||||
|
int simulation_adaptive_sampling_method,
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda);
|
||||||
|
|
||||||
|
virtual ~DataProviderOnlineSimple();
|
||||||
|
|
||||||
|
virtual bool spinOnce() override;
|
||||||
|
|
||||||
|
virtual bool ok() const override;
|
||||||
|
|
||||||
|
size_t numCameras() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool sampleFrame();
|
||||||
|
void setFrameUpdated();
|
||||||
|
|
||||||
|
SimpleRenderer::Ptr renderer_;
|
||||||
|
SimulatorData sim_data_;
|
||||||
|
|
||||||
|
ze::real_t t_;
|
||||||
|
ze::real_t last_t_frame_; // latest next sampling time in order to guarantee the minimum frame rate
|
||||||
|
ze::real_t next_t_adaptive_; // latest next sampling time in order to guarantee that the adaptive sampling scheme is respected
|
||||||
|
ze::real_t dt_frame_;
|
||||||
|
|
||||||
|
ze::real_t simulation_minimum_framerate_;
|
||||||
|
int simulation_adaptive_sampling_method_;
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright (C) ETH Zurich, Wyss Zurich, Zurich Eye - All Rights Reserved
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include<gflags/gflags.h>
|
||||||
|
|
||||||
|
#include <rosbag/bag.h>
|
||||||
|
#include <rosbag/view.h>
|
||||||
|
|
||||||
|
#include <sensor_msgs/Image.h>
|
||||||
|
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class DataProviderRosbag : public DataProviderBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
DataProviderRosbag(
|
||||||
|
const std::string& bag_filename,
|
||||||
|
const std::map<std::string, size_t>& camera_topics);
|
||||||
|
|
||||||
|
virtual ~DataProviderRosbag() = default;
|
||||||
|
|
||||||
|
virtual bool spinOnce() override;
|
||||||
|
|
||||||
|
virtual bool ok() const override;
|
||||||
|
|
||||||
|
virtual size_t numCameras() const;
|
||||||
|
|
||||||
|
size_t size() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void loadRosbag(const std::string& bag_filename);
|
||||||
|
void initBagView(const std::vector<std::string>& topics);
|
||||||
|
|
||||||
|
inline bool cameraSpin(const sensor_msgs::ImageConstPtr m_img,
|
||||||
|
const rosbag::MessageInstance& m);
|
||||||
|
|
||||||
|
std::unique_ptr<rosbag::Bag> bag_;
|
||||||
|
std::unique_ptr<rosbag::View> bag_view_;
|
||||||
|
rosbag::View::iterator bag_view_it_;
|
||||||
|
int n_processed_images_ = 0;
|
||||||
|
|
||||||
|
// subscribed topics:
|
||||||
|
std::map<std::string, size_t> img_topic_camidx_map_; // camera_topic --> camera_id
|
||||||
|
|
||||||
|
SimulatorData sim_data_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,13 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <esim/rendering/renderer_base.hpp>
|
||||||
|
#include <esim/rendering/simple_renderer_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat *img);
|
||||||
|
Renderer::Ptr loadRendererFromGflags();
|
||||||
|
SimpleRenderer::Ptr loadSimpleRendererFromGflags();
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
26
event_camera_simulator/esim_data_provider/package.xml
Normal file
26
event_camera_simulator/esim_data_provider/package.xml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_data_provider</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Data providers for the event camera simulator.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>esim_rendering</depend>
|
||||||
|
<depend>imp_planar_renderer</depend>
|
||||||
|
<depend>imp_panorama_renderer</depend>
|
||||||
|
<depend>imp_opengl_renderer</depend>
|
||||||
|
<depend>imp_unrealcv_renderer</depend>
|
||||||
|
<depend>imp_multi_objects_2d</depend>
|
||||||
|
<depend>esim_trajectory</depend>
|
||||||
|
<depend>ze_vi_simulation</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
@ -0,0 +1,33 @@
|
|||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderBase::DataProviderBase(DataProviderType type)
|
||||||
|
: type_(type)
|
||||||
|
, signal_handler_(running_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void DataProviderBase::spin()
|
||||||
|
{
|
||||||
|
while (ok())
|
||||||
|
{
|
||||||
|
spinOnce();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderBase::pause()
|
||||||
|
{
|
||||||
|
running_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderBase::shutdown()
|
||||||
|
{
|
||||||
|
running_ = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderBase::registerCallback(const Callback& callback)
|
||||||
|
{
|
||||||
|
callback_ = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,99 @@
|
|||||||
|
#include <ze/common/logging.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_factory.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_base.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_online_render.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_online_simple.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_from_folder.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_rosbag.hpp>
|
||||||
|
|
||||||
|
DEFINE_int32(data_source, 0, " 0: Online renderer");
|
||||||
|
|
||||||
|
DEFINE_double(simulation_minimum_framerate, 30.0,
|
||||||
|
"Minimum frame rate, in Hz"
|
||||||
|
"Especially useful when the event rate is low, to guarantee"
|
||||||
|
"that frames are still published at a minimum framerate");
|
||||||
|
|
||||||
|
DEFINE_double(simulation_imu_rate, 1000.0,
|
||||||
|
"Fixed IMU sampling frequency, in Hz");
|
||||||
|
|
||||||
|
DEFINE_int32(simulation_adaptive_sampling_method, 0,
|
||||||
|
"Method to use for adaptive sampling."
|
||||||
|
"0: based on predicted absolute brightness change"
|
||||||
|
"1: based on optic flow");
|
||||||
|
|
||||||
|
DEFINE_double(simulation_adaptive_sampling_lambda, 0.5,
|
||||||
|
"Parameter that controls the behavior of the adaptive sampling method."
|
||||||
|
"The meaning of this value depends on the adaptive sampling method used:"
|
||||||
|
"...based on predicted absolute brightness change: deltaT = lambda / max(|dL/dt|)"
|
||||||
|
"...based on optic flow: deltaT = lambda \ max(||du/dt||) where du/dt denotes the 2D optic flow field.");
|
||||||
|
|
||||||
|
DEFINE_string(path_to_data_folder, "",
|
||||||
|
"Path to folder containing the data.");
|
||||||
|
|
||||||
|
|
||||||
|
// Parameters for the DataProviderRosbag
|
||||||
|
DEFINE_string(bag_filename, "dataset.bag", "Name of bagfile from which to read.");
|
||||||
|
DEFINE_string(topic_cam0, "/cam0/image_raw", "");
|
||||||
|
DEFINE_string(topic_cam1, "/cam1/image_raw", "");
|
||||||
|
DEFINE_string(topic_cam2, "/cam2/image_raw", "");
|
||||||
|
DEFINE_string(topic_cam3, "/cam3/image_raw", "");
|
||||||
|
DEFINE_uint64(num_cams, 1, "Number of normal cameras to read from rosbag.");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderBase::Ptr loadDataProviderFromGflags()
|
||||||
|
{
|
||||||
|
// Create data provider.
|
||||||
|
DataProviderBase::Ptr data_provider;
|
||||||
|
switch (FLAGS_data_source)
|
||||||
|
{
|
||||||
|
case 0: // Online Renderer for Moving 3D Camera Rig with IMU
|
||||||
|
{
|
||||||
|
data_provider.reset(
|
||||||
|
new DataProviderOnlineMoving3DCameraRig(FLAGS_simulation_minimum_framerate,
|
||||||
|
FLAGS_simulation_imu_rate,
|
||||||
|
FLAGS_simulation_adaptive_sampling_method,
|
||||||
|
FLAGS_simulation_adaptive_sampling_lambda));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 1: // Online Renderer Simple
|
||||||
|
{
|
||||||
|
data_provider.reset(
|
||||||
|
new DataProviderOnlineSimple(FLAGS_simulation_minimum_framerate,
|
||||||
|
FLAGS_simulation_adaptive_sampling_method,
|
||||||
|
FLAGS_simulation_adaptive_sampling_lambda));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 2: // Read data from a folder
|
||||||
|
{
|
||||||
|
data_provider.reset(
|
||||||
|
new DataProviderFromFolder(FLAGS_path_to_data_folder));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 3: // Read data (images) from a rosbag
|
||||||
|
{
|
||||||
|
CHECK_LE(FLAGS_num_cams, 4u);
|
||||||
|
CHECK_EQ(FLAGS_num_cams, 1u) << "Only one camera is supported currently";
|
||||||
|
|
||||||
|
// Fill camera topics.
|
||||||
|
std::map<std::string, size_t> cam_topics;
|
||||||
|
if (FLAGS_num_cams >= 1) cam_topics[FLAGS_topic_cam0] = 0;
|
||||||
|
if (FLAGS_num_cams >= 2) cam_topics[FLAGS_topic_cam1] = 1;
|
||||||
|
if (FLAGS_num_cams >= 3) cam_topics[FLAGS_topic_cam2] = 2;
|
||||||
|
if (FLAGS_num_cams >= 4) cam_topics[FLAGS_topic_cam3] = 3;
|
||||||
|
data_provider.reset(
|
||||||
|
new DataProviderRosbag(FLAGS_bag_filename,
|
||||||
|
cam_topics));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Data source not known.";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data_provider;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ze
|
@ -0,0 +1,88 @@
|
|||||||
|
#include <esim/data_provider/data_provider_from_folder.hpp>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderFromFolder::DataProviderFromFolder(const std::string& path_to_data_folder)
|
||||||
|
: DataProviderBase(DataProviderType::Folder),
|
||||||
|
path_to_data_folder_(path_to_data_folder)
|
||||||
|
{
|
||||||
|
// Load CSV image file
|
||||||
|
images_in_str_.open(ze::joinPath(path_to_data_folder, "images.csv"));
|
||||||
|
CHECK(images_in_str_.is_open());
|
||||||
|
|
||||||
|
// Load camera rig
|
||||||
|
camera_rig_ = ze::cameraRigFromYaml(ze::joinPath(path_to_data_folder, "calib.yaml"));
|
||||||
|
CHECK(camera_rig_);
|
||||||
|
CHECK_EQ(camera_rig_->size(), 1u) << "Only one camera in the rig is supported at the moment";
|
||||||
|
|
||||||
|
// Allocate memory for image data
|
||||||
|
sim_data_.images.emplace_back(ImagePtr(new Image(
|
||||||
|
cv::Size(camera_rig_->at(0).width(),
|
||||||
|
camera_rig_->at(0).height()))));
|
||||||
|
|
||||||
|
sim_data_.camera_rig = camera_rig_;
|
||||||
|
sim_data_.images_updated = true;
|
||||||
|
sim_data_.depthmaps_updated = false;
|
||||||
|
sim_data_.optic_flows_updated = false;
|
||||||
|
sim_data_.twists_updated = false;
|
||||||
|
sim_data_.poses_updated = false;
|
||||||
|
sim_data_.imu_updated = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t DataProviderFromFolder::getTimeStamp(const std::string& ts_str) const
|
||||||
|
{
|
||||||
|
return std::stoll(ts_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DataProviderFromFolder::numCameras() const
|
||||||
|
{
|
||||||
|
return camera_rig_->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderFromFolder::spinOnce()
|
||||||
|
{
|
||||||
|
std::string line;
|
||||||
|
if(!getline(images_in_str_, line))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if('%' != line.at(0) && '#' != line.at(0))
|
||||||
|
{
|
||||||
|
std::vector<std::string> items = ze::splitString(line, delimiter_);
|
||||||
|
CHECK_GE(items.size(), num_tokens_in_line_);
|
||||||
|
int64_t stamp = getTimeStamp(items[0]);
|
||||||
|
|
||||||
|
const std::string& path_to_image = ze::joinPath(path_to_data_folder_, "frame", "cam_0", items[1]);
|
||||||
|
cv::Mat image = cv::imread(path_to_image, 0);
|
||||||
|
CHECK(image.data) << "Could not load image from file: " << path_to_image;
|
||||||
|
CHECK(image.rows == camera_rig_->at(0).height()
|
||||||
|
&& image.cols == camera_rig_->at(0).width()) << "The image size in the data folder and the image size"
|
||||||
|
"specified in the camera rig do not match";
|
||||||
|
|
||||||
|
VLOG(3) << "Read image from file: " << path_to_image;
|
||||||
|
image.convertTo(*sim_data_.images[0], cv::DataType<ImageFloatType>::type, 1./255.);
|
||||||
|
|
||||||
|
if(callback_)
|
||||||
|
{
|
||||||
|
sim_data_.timestamp = static_cast<Time>(stamp);
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderFromFolder::ok() const
|
||||||
|
{
|
||||||
|
if (!running_)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Data Provider was paused/terminated.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,420 @@
|
|||||||
|
#include <esim/data_provider/data_provider_online_render.hpp>
|
||||||
|
#include <esim/trajectory/trajectory_factory.hpp>
|
||||||
|
#include <esim/trajectory/imu_factory.hpp>
|
||||||
|
#include <esim/data_provider/renderer_factory.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
|
||||||
|
#include <ze/cameras/camera_rig.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <ze/common/timer_collection.hpp>
|
||||||
|
|
||||||
|
DECLARE_TIMER(TimerDataProvider, timers_data_provider_,
|
||||||
|
render,
|
||||||
|
optic_flow,
|
||||||
|
sample_frame,
|
||||||
|
sample_imu
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_double(simulation_post_gaussian_blur_sigma, 0,
|
||||||
|
"If sigma > 0, Gaussian blur the renderered images"
|
||||||
|
"with a Gaussian filter standard deviation sigma.");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderOnlineMoving3DCameraRig::DataProviderOnlineMoving3DCameraRig(ze::real_t simulation_minimum_framerate,
|
||||||
|
ze::real_t simulation_imu_rate,
|
||||||
|
int simulation_adaptive_sampling_method,
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda)
|
||||||
|
: DataProviderBase(DataProviderType::RendererOnline),
|
||||||
|
simulation_minimum_framerate_(simulation_minimum_framerate),
|
||||||
|
simulation_imu_rate_(simulation_imu_rate),
|
||||||
|
simulation_adaptive_sampling_method_(simulation_adaptive_sampling_method),
|
||||||
|
simulation_adaptive_sampling_lambda_(simulation_adaptive_sampling_lambda),
|
||||||
|
dt_imu_(1./simulation_imu_rate),
|
||||||
|
dt_frame_(1./simulation_minimum_framerate)
|
||||||
|
{
|
||||||
|
CHECK(simulation_adaptive_sampling_method == 0
|
||||||
|
|| simulation_adaptive_sampling_method == 1);
|
||||||
|
|
||||||
|
std::tie(trajectory_, trajectory_dyn_obj_) = loadTrajectorySimulatorFromGflags();
|
||||||
|
imu_ = loadImuSimulatorFromGflags(trajectory_);
|
||||||
|
camera_rig_ = ze::cameraRigFromGflags();
|
||||||
|
|
||||||
|
// Compute Field-of-Views for information
|
||||||
|
const ze::Camera::Ptr camera = camera_rig_->atShared(0);
|
||||||
|
const int width = camera->width();
|
||||||
|
const int height = camera->height();
|
||||||
|
|
||||||
|
const ze::real_t horizontal_fov =
|
||||||
|
180.0 / CV_PI *
|
||||||
|
std::acos(camera->backProject(ze::Keypoint(0,height/2)).dot(
|
||||||
|
camera->backProject(ze::Keypoint(width-1,height/2))));
|
||||||
|
|
||||||
|
const ze::real_t vertical_fov =
|
||||||
|
180.0 / CV_PI *
|
||||||
|
std::acos(camera->backProject(ze::Keypoint(width/2,0)).dot(
|
||||||
|
camera->backProject(ze::Keypoint(width/2,height-1))));
|
||||||
|
|
||||||
|
const ze::real_t diagonal_fov =
|
||||||
|
180.0 / CV_PI *
|
||||||
|
std::acos(camera->backProject(ze::Keypoint(0,0)).dot(
|
||||||
|
camera->backProject(ze::Keypoint(width-1,height-1))));
|
||||||
|
|
||||||
|
LOG(INFO) << "Horizontal FOV: " << horizontal_fov << " deg";
|
||||||
|
LOG(INFO) << "Vertical FOV: " << vertical_fov << " deg";
|
||||||
|
LOG(INFO) << "Diagonal FOV: " << diagonal_fov << " deg";
|
||||||
|
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
renderers_.push_back(std::move(loadRendererFromGflags()));
|
||||||
|
renderers_[i]->setCamera(camera_rig_->atShared(i));
|
||||||
|
|
||||||
|
optic_flow_helpers_.emplace_back(std::make_shared<OpticFlowHelper>(camera_rig_->atShared(i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t num_cameras = camera_rig_->size();
|
||||||
|
|
||||||
|
sim_data_.groundtruth.T_W_Cs.resize(num_cameras);
|
||||||
|
sim_data_.groundtruth.angular_velocities_.resize(num_cameras);
|
||||||
|
sim_data_.groundtruth.linear_velocities_.resize(num_cameras);
|
||||||
|
for(size_t i=0; i<num_cameras; ++i)
|
||||||
|
{
|
||||||
|
const cv::Size size = cv::Size(camera_rig_->at(i).width(),
|
||||||
|
camera_rig_->at(i).height());
|
||||||
|
|
||||||
|
sim_data_.images.emplace_back(ImagePtr(new Image(size)));
|
||||||
|
sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size)));
|
||||||
|
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size)));
|
||||||
|
|
||||||
|
sim_data_.images[i]->setTo(0);
|
||||||
|
sim_data_.depthmaps[i]->setTo(0);
|
||||||
|
sim_data_.optic_flows[i]->setTo(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t i=0; i<trajectory_dyn_obj_.size(); i++)
|
||||||
|
{
|
||||||
|
sim_data_.groundtruth.T_W_OBJ_.push_back(Transformation());
|
||||||
|
sim_data_.groundtruth.linear_velocity_obj_.push_back(LinearVelocity());
|
||||||
|
sim_data_.groundtruth.angular_velocity_obj_.push_back(AngularVelocity());
|
||||||
|
}
|
||||||
|
|
||||||
|
sim_data_.camera_rig = camera_rig_;
|
||||||
|
t_ = trajectory_->start();
|
||||||
|
|
||||||
|
// At the initial time, we sample everything (IMU + frames)
|
||||||
|
sampleImu();
|
||||||
|
sampleFrame();
|
||||||
|
setAllUpdated();
|
||||||
|
if(callback_)
|
||||||
|
{
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DataProviderOnlineMoving3DCameraRig::~DataProviderOnlineMoving3DCameraRig()
|
||||||
|
{
|
||||||
|
timers_data_provider_.saveToFile("/tmp", "data_provider_online_render.csv");
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DataProviderOnlineMoving3DCameraRig::numCameras() const
|
||||||
|
{
|
||||||
|
if(camera_rig_)
|
||||||
|
{
|
||||||
|
return camera_rig_->size();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::updateGroundtruth()
|
||||||
|
{
|
||||||
|
const Transformation T_W_B = trajectory_->T_W_B(t_);
|
||||||
|
sim_data_.groundtruth.T_W_B = T_W_B;
|
||||||
|
|
||||||
|
const AngularVelocity omega_B = trajectory_->angularVelocity_B(t_);
|
||||||
|
const LinearVelocity v_B_W = trajectory_->velocity_W(t_);
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
sim_data_.groundtruth.T_W_Cs[i] =
|
||||||
|
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i);
|
||||||
|
|
||||||
|
const LinearVelocity v_W = v_B_W + T_W_B.getRotation().rotate(
|
||||||
|
ze::skewSymmetric(omega_B) * camera_rig_->T_B_C(i).getPosition());
|
||||||
|
|
||||||
|
const AngularVelocity omega_C = camera_rig_->T_C_B(i).getRotation().rotate(omega_B);
|
||||||
|
const LinearVelocity v_C = (camera_rig_->T_C_B(i) * T_W_B.inverse()).getRotation().rotate(v_W);
|
||||||
|
|
||||||
|
sim_data_.groundtruth.angular_velocities_[i] = omega_C;
|
||||||
|
sim_data_.groundtruth.linear_velocities_[i] = v_C;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update poses of dynamic objects
|
||||||
|
for (size_t i = 0; i < trajectory_dyn_obj_.size(); i++)
|
||||||
|
{
|
||||||
|
sim_data_.groundtruth.T_W_OBJ_[i] = trajectory_dyn_obj_[i]->T_W_B(t_);
|
||||||
|
sim_data_.groundtruth.linear_velocity_obj_[i] = trajectory_dyn_obj_[i]->velocity_W(t_);
|
||||||
|
sim_data_.groundtruth.angular_velocity_obj_[i] = sim_data_.groundtruth.T_W_OBJ_[i].getRotation().rotate(trajectory_dyn_obj_[i]->angularVelocity_B(t_));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::sampleImu()
|
||||||
|
{
|
||||||
|
// Sample new IMU (+ poses, twists, etc.) values,
|
||||||
|
// Fill in the approriate data in sim_data
|
||||||
|
auto t = timers_data_provider_[::TimerDataProvider::sample_imu].timeScope();
|
||||||
|
|
||||||
|
if(t_ > trajectory_->end())
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
updateGroundtruth();
|
||||||
|
sim_data_.specific_force_corrupted = imu_->specificForceCorrupted(t_);
|
||||||
|
sim_data_.angular_velocity_corrupted = imu_->angularVelocityCorrupted(t_);
|
||||||
|
sim_data_.groundtruth.acc_bias = imu_->bias()->accelerometer(t_);
|
||||||
|
sim_data_.groundtruth.gyr_bias = imu_->bias()->gyroscope(t_);
|
||||||
|
|
||||||
|
last_t_imu_ = t_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::sampleFrame()
|
||||||
|
{
|
||||||
|
// Sample (i.e. render) a new frame (+ depth map),
|
||||||
|
// Fill in the appropriate data in sim_data
|
||||||
|
// Compute the optic flow and compute the next latest sampling time in order
|
||||||
|
// to guarantee that the displacement is bounded by simulation_max_displacement_successive_frames
|
||||||
|
auto t = timers_data_provider_[::TimerDataProvider::sample_frame].timeScope();
|
||||||
|
|
||||||
|
if(t_ > trajectory_->end())
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
updateGroundtruth();
|
||||||
|
|
||||||
|
{
|
||||||
|
auto t = timers_data_provider_[::TimerDataProvider::render].timeScope();
|
||||||
|
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
CHECK(renderers_[i]);
|
||||||
|
|
||||||
|
// if the renderer provides a function to compute the optic
|
||||||
|
// flow (for example, the OpenGL renderer which implements
|
||||||
|
// optic flow computation efficiently using a shader), use that.
|
||||||
|
// otherwise, compute the optic flow ourselves using the renderer depthmap
|
||||||
|
if(renderers_[i]->canComputeOpticFlow())
|
||||||
|
{
|
||||||
|
renderers_[i]->renderWithFlow(sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
|
||||||
|
sim_data_.groundtruth.linear_velocities_[i],
|
||||||
|
sim_data_.groundtruth.angular_velocities_[i],
|
||||||
|
sim_data_.groundtruth.T_W_OBJ_,
|
||||||
|
sim_data_.groundtruth.linear_velocity_obj_,
|
||||||
|
sim_data_.groundtruth.angular_velocity_obj_,
|
||||||
|
sim_data_.images[i],
|
||||||
|
sim_data_.depthmaps[i],
|
||||||
|
sim_data_.optic_flows[i]);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
renderers_[i]->render(sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
|
||||||
|
sim_data_.groundtruth.T_W_OBJ_,
|
||||||
|
sim_data_.images[i],
|
||||||
|
sim_data_.depthmaps[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optionally blur the rendered images slightly
|
||||||
|
if(FLAGS_simulation_post_gaussian_blur_sigma > 0)
|
||||||
|
{
|
||||||
|
gaussianBlur(sim_data_.images[i], FLAGS_simulation_post_gaussian_blur_sigma);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto t = timers_data_provider_[::TimerDataProvider::optic_flow].timeScope();
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
CHECK(optic_flow_helpers_[i]);
|
||||||
|
if(!renderers_[i]->canComputeOpticFlow())
|
||||||
|
{
|
||||||
|
optic_flow_helpers_[i]->computeFromDepthAndTwist(sim_data_.groundtruth.angular_velocities_[i],
|
||||||
|
sim_data_.groundtruth.linear_velocities_[i],
|
||||||
|
sim_data_.depthmaps[i], sim_data_.optic_flows[i]);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive sampling scheme based on predicted brightness change
|
||||||
|
if(simulation_adaptive_sampling_method_ == 0)
|
||||||
|
{
|
||||||
|
// Predict brightness change based on image gradient and optic flow
|
||||||
|
std::vector<FloatType> max_dLdts;
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
max_dLdts.push_back(
|
||||||
|
maxPredictedAbsBrightnessChange(sim_data_.images[i],
|
||||||
|
sim_data_.optic_flows[i]));
|
||||||
|
}
|
||||||
|
const FloatType max_dLdt = *std::max_element(max_dLdts.begin(),
|
||||||
|
max_dLdts.end());
|
||||||
|
|
||||||
|
VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s";
|
||||||
|
|
||||||
|
// Compute next sampling time
|
||||||
|
// t_{k+1} = t_k + delta_t where
|
||||||
|
// delta_t = lambda / max(|dL/dt|)
|
||||||
|
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_dLdt;
|
||||||
|
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
|
||||||
|
|
||||||
|
next_t_flow_ = t_ + delta_t;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive sampling scheme based on optic flow
|
||||||
|
else {
|
||||||
|
std::vector<FloatType> max_flow_magnitudes;
|
||||||
|
for(size_t i=0; i<camera_rig_->size(); ++i)
|
||||||
|
{
|
||||||
|
max_flow_magnitudes.push_back(maxMagnitudeOpticFlow(sim_data_.optic_flows[i]));
|
||||||
|
}
|
||||||
|
const FloatType max_flow_magnitude = *std::max_element(max_flow_magnitudes.begin(), max_flow_magnitudes.end());
|
||||||
|
|
||||||
|
VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude << " px/s";
|
||||||
|
|
||||||
|
// Compute next sampling time
|
||||||
|
// t_{k+1} = t_k + delta_t where
|
||||||
|
// delta_t = lambda / max(||optic_flow||)
|
||||||
|
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_flow_magnitude;
|
||||||
|
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
|
||||||
|
|
||||||
|
next_t_flow_ = t_ + delta_t;
|
||||||
|
}
|
||||||
|
|
||||||
|
last_t_frame_ = t_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::setImuUpdated()
|
||||||
|
{
|
||||||
|
// Set all the IMU-related flags to true, and all the frame-related flags to false
|
||||||
|
sim_data_.imu_updated = true;
|
||||||
|
sim_data_.twists_updated = true;
|
||||||
|
sim_data_.poses_updated = true;
|
||||||
|
sim_data_.images_updated = false;
|
||||||
|
sim_data_.depthmaps_updated = false;
|
||||||
|
sim_data_.optic_flows_updated = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::setFrameUpdated()
|
||||||
|
{
|
||||||
|
// Set all the frame-related flags to true, and all the IMU-related flags to false
|
||||||
|
sim_data_.imu_updated = false;
|
||||||
|
sim_data_.twists_updated = true;
|
||||||
|
sim_data_.poses_updated = true;
|
||||||
|
sim_data_.images_updated = true;
|
||||||
|
sim_data_.depthmaps_updated = true;
|
||||||
|
sim_data_.optic_flows_updated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineMoving3DCameraRig::setAllUpdated()
|
||||||
|
{
|
||||||
|
// Set all the flags to true to indicated everything has been changed
|
||||||
|
sim_data_.imu_updated = true;
|
||||||
|
sim_data_.twists_updated = true;
|
||||||
|
sim_data_.poses_updated = true;
|
||||||
|
sim_data_.images_updated = true;
|
||||||
|
sim_data_.depthmaps_updated = true;
|
||||||
|
sim_data_.optic_flows_updated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderOnlineMoving3DCameraRig::spinOnce()
|
||||||
|
{
|
||||||
|
/* At what time do we need to sample "something" (and what "something"?)
|
||||||
|
We choose the next sampling time by considering the following constraints:
|
||||||
|
|
||||||
|
1. The IMU sampling rate must be constant (typically, from 200 Hz to 1 kHz)
|
||||||
|
2. The frame sampling rate must be greater than a minimum value (typically, from 20 Hz to 100 Hz)
|
||||||
|
3. The pixel displacement between two successive frames must be lower than a threshold.
|
||||||
|
|
||||||
|
* If the next sample needs to be an IMU sample, we just sample a new IMU value, without regenerating a frame,
|
||||||
|
and transmit only the new IMU (+ poses, twists, IMU bias) to the publisher by setting the approriate "data_changed" flags in the
|
||||||
|
sim_data structure.
|
||||||
|
|
||||||
|
* If the next sample needs to be a frame sample, we render a new frame (+ depth map + optic flow), but not a new IMU value.
|
||||||
|
This can happen either because
|
||||||
|
(i) a new frame must be rendered in order to guarantee that the displacement between frames is bounded, or
|
||||||
|
(ii) the frame rate should be higher than a minimum (used-defined) value.
|
||||||
|
|
||||||
|
At the beginning of time (t_ = trajectory_->start()), we sample everything (IMU + frame).
|
||||||
|
*/
|
||||||
|
|
||||||
|
const ze::real_t next_t_imu = last_t_imu_ + dt_imu_;
|
||||||
|
const ze::real_t next_t_frame = last_t_frame_ + dt_frame_;
|
||||||
|
|
||||||
|
VLOG(2) << "t = " << t_;
|
||||||
|
VLOG(2) << "next_t_imu = " << next_t_imu;
|
||||||
|
VLOG(2) << "next_t_frame = " << next_t_frame;
|
||||||
|
VLOG(2) << "next_t_flow = " << next_t_flow_;
|
||||||
|
|
||||||
|
if(next_t_imu < next_t_frame && next_t_imu < next_t_flow_)
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample IMU";
|
||||||
|
t_ = next_t_imu;
|
||||||
|
sampleImu();
|
||||||
|
setImuUpdated();
|
||||||
|
}
|
||||||
|
else if(next_t_flow_ < next_t_imu && next_t_flow_ < next_t_imu)
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample frame (because of optic flow)";
|
||||||
|
t_ = next_t_flow_;
|
||||||
|
sampleFrame();
|
||||||
|
setFrameUpdated();
|
||||||
|
}
|
||||||
|
else if(next_t_frame < next_t_imu && next_t_frame < next_t_flow_)
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample frame (because of minimum framerate)";
|
||||||
|
t_ = next_t_frame;
|
||||||
|
sampleFrame();
|
||||||
|
setFrameUpdated();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample IMU and frame";
|
||||||
|
t_ = next_t_frame;
|
||||||
|
// In that case, we sample everything
|
||||||
|
sampleImu();
|
||||||
|
sampleFrame();
|
||||||
|
setAllUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
if(t_ > trajectory_->end())
|
||||||
|
{
|
||||||
|
running_ = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(callback_)
|
||||||
|
{
|
||||||
|
sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_));
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available.";
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderOnlineMoving3DCameraRig::ok() const
|
||||||
|
{
|
||||||
|
if (!running_)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Data Provider was paused/terminated.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,177 @@
|
|||||||
|
#include <esim/data_provider/data_provider_online_simple.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <esim/data_provider/renderer_factory.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
|
||||||
|
DECLARE_double(simulation_post_gaussian_blur_sigma);
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderOnlineSimple::DataProviderOnlineSimple(ze::real_t simulation_minimum_framerate,
|
||||||
|
int simulation_adaptive_sampling_method,
|
||||||
|
ze::real_t simulation_adaptive_sampling_lambda)
|
||||||
|
: DataProviderBase(DataProviderType::RendererOnline),
|
||||||
|
simulation_minimum_framerate_(simulation_minimum_framerate),
|
||||||
|
simulation_adaptive_sampling_method_(simulation_adaptive_sampling_method),
|
||||||
|
simulation_adaptive_sampling_lambda_(simulation_adaptive_sampling_lambda),
|
||||||
|
dt_frame_(1./simulation_minimum_framerate)
|
||||||
|
{
|
||||||
|
CHECK(simulation_adaptive_sampling_method == 0
|
||||||
|
|| simulation_adaptive_sampling_method == 1);
|
||||||
|
|
||||||
|
renderer_ = loadSimpleRendererFromGflags();
|
||||||
|
|
||||||
|
const size_t num_cameras = 1u;
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras; ++i)
|
||||||
|
{
|
||||||
|
const cv::Size size = cv::Size(renderer_->getWidth(),
|
||||||
|
renderer_->getHeight());
|
||||||
|
|
||||||
|
sim_data_.images.emplace_back(ImagePtr(new Image(size)));
|
||||||
|
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size)));
|
||||||
|
|
||||||
|
sim_data_.images[i]->setTo(0);
|
||||||
|
sim_data_.optic_flows[i]->setTo(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// At the initial time, we sample a frame + optic flow map
|
||||||
|
t_ = 0.;
|
||||||
|
sampleFrame();
|
||||||
|
setFrameUpdated();
|
||||||
|
if(callback_)
|
||||||
|
{
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DataProviderOnlineSimple::~DataProviderOnlineSimple()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DataProviderOnlineSimple::numCameras() const
|
||||||
|
{
|
||||||
|
return 1u;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderOnlineSimple::sampleFrame()
|
||||||
|
{
|
||||||
|
// Sample (i.e. render) a new frame (+ optic flow map),
|
||||||
|
// Fill in the appropriate data in sim_data
|
||||||
|
// Compute the optic flow and compute the next latest sampling time in order
|
||||||
|
// to guarantee that the displacement is bounded by simulation_max_displacement_successive_frames
|
||||||
|
CHECK(renderer_);
|
||||||
|
bool is_finished = renderer_->render(ze::secToNanosec(t_),
|
||||||
|
sim_data_.images[0],
|
||||||
|
sim_data_.optic_flows[0]);
|
||||||
|
|
||||||
|
if(is_finished)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optionally blur the rendered images slightly
|
||||||
|
if(FLAGS_simulation_post_gaussian_blur_sigma > 0)
|
||||||
|
{
|
||||||
|
gaussianBlur(sim_data_.images[0], FLAGS_simulation_post_gaussian_blur_sigma);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive sampling scheme based on predicted brightness change
|
||||||
|
if(simulation_adaptive_sampling_method_ == 0)
|
||||||
|
{
|
||||||
|
// Predict brightness change based on image gradient and optic flow
|
||||||
|
const FloatType max_dLdt = maxPredictedAbsBrightnessChange(sim_data_.images[0],
|
||||||
|
sim_data_.optic_flows[0]);
|
||||||
|
|
||||||
|
VLOG(1) << "max(|dLdt|) = " << max_dLdt << " logDN/s";
|
||||||
|
|
||||||
|
// Compute next sampling time
|
||||||
|
// t_{k+1} = t_k + delta_t where
|
||||||
|
// delta_t = lambda / max(|dL/dt|)
|
||||||
|
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_dLdt;
|
||||||
|
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
|
||||||
|
|
||||||
|
next_t_adaptive_ = t_ + delta_t;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive sampling scheme based on optic flow
|
||||||
|
else {
|
||||||
|
const FloatType max_flow_magnitude = maxMagnitudeOpticFlow(sim_data_.optic_flows[0]);
|
||||||
|
|
||||||
|
VLOG(1) << "max(||optic_flow||) = " << max_flow_magnitude << " px/s";
|
||||||
|
|
||||||
|
// Compute next sampling time
|
||||||
|
// t_{k+1} = t_k + delta_t where
|
||||||
|
// delta_t = lambda / max(||optic_flow||)
|
||||||
|
const ze::real_t delta_t = simulation_adaptive_sampling_lambda_ / max_flow_magnitude;
|
||||||
|
VLOG(1) << "deltaT = " << 1000.0 * delta_t << " ms";
|
||||||
|
|
||||||
|
next_t_adaptive_ = t_ + delta_t;
|
||||||
|
}
|
||||||
|
|
||||||
|
last_t_frame_ = t_;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderOnlineSimple::setFrameUpdated()
|
||||||
|
{
|
||||||
|
// Set all the frame-related flags to true, and all the IMU-related flags to false
|
||||||
|
sim_data_.imu_updated = false;
|
||||||
|
sim_data_.twists_updated = false;
|
||||||
|
sim_data_.poses_updated = false;
|
||||||
|
sim_data_.images_updated = true;
|
||||||
|
sim_data_.depthmaps_updated = false;
|
||||||
|
sim_data_.optic_flows_updated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderOnlineSimple::spinOnce()
|
||||||
|
{
|
||||||
|
const ze::real_t next_t_frame = last_t_frame_ + dt_frame_;
|
||||||
|
|
||||||
|
VLOG(2) << "t = " << t_;
|
||||||
|
VLOG(2) << "next_t_frame = " << next_t_frame;
|
||||||
|
VLOG(2) << "next_t_flow = " << next_t_adaptive_;
|
||||||
|
|
||||||
|
if(next_t_adaptive_ < next_t_frame)
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample frame (because of optic flow)";
|
||||||
|
t_ = next_t_adaptive_;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
VLOG(2) << "Sample frame (because of minimum framerate)";
|
||||||
|
t_ = next_t_frame;
|
||||||
|
}
|
||||||
|
bool is_finished = sampleFrame();
|
||||||
|
setFrameUpdated();
|
||||||
|
|
||||||
|
if(is_finished)
|
||||||
|
{
|
||||||
|
running_ = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(callback_)
|
||||||
|
{
|
||||||
|
sim_data_.timestamp = static_cast<Time>(ze::secToNanosec(t_));
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available.";
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderOnlineSimple::ok() const
|
||||||
|
{
|
||||||
|
if (!running_)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Data Provider was paused/terminated.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,242 @@
|
|||||||
|
#include <esim/data_provider/data_provider_rosbag.hpp>
|
||||||
|
|
||||||
|
#include <ze/common/logging.hpp>
|
||||||
|
#include <rosbag/query.h>
|
||||||
|
#include <cv_bridge/cv_bridge.h>
|
||||||
|
#include <sensor_msgs/image_encodings.h>
|
||||||
|
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <ze/common/string_utils.hpp>
|
||||||
|
#include <ze/common/path_utils.hpp>
|
||||||
|
|
||||||
|
DEFINE_int32(data_source_stop_after_n_frames, -1,
|
||||||
|
"How many frames should be processed?");
|
||||||
|
DEFINE_double(data_source_start_time_s, 0.0,
|
||||||
|
"Start time in seconds");
|
||||||
|
DEFINE_double(data_source_stop_time_s, 0.0,
|
||||||
|
"Stop time in seconds");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
DataProviderRosbag::DataProviderRosbag(
|
||||||
|
const std::string& bag_filename,
|
||||||
|
const std::map<std::string, size_t>& img_topic_camidx_map)
|
||||||
|
: DataProviderBase(DataProviderType::Rosbag)
|
||||||
|
, img_topic_camidx_map_(img_topic_camidx_map)
|
||||||
|
{
|
||||||
|
loadRosbag(bag_filename);
|
||||||
|
|
||||||
|
std::vector<std::string> topics;
|
||||||
|
for (auto it : img_topic_camidx_map_)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Subscribing to: " << it.first;
|
||||||
|
topics.push_back(it.first);
|
||||||
|
sim_data_.images.emplace_back(ImagePtr(new Image()));
|
||||||
|
}
|
||||||
|
|
||||||
|
initBagView(topics);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderRosbag::loadRosbag(const std::string& bag_filename)
|
||||||
|
{
|
||||||
|
CHECK(ze::fileExists(bag_filename)) << "File does not exist: " << bag_filename;
|
||||||
|
VLOG(1) << "Opening rosbag: " << bag_filename << " ...";
|
||||||
|
bag_.reset(new rosbag::Bag);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
bag_->open(bag_filename, rosbag::bagmode::Read);
|
||||||
|
}
|
||||||
|
catch (const std::exception e)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Could not open rosbag " << bag_filename << ": " << e.what();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataProviderRosbag::initBagView(const std::vector<std::string>& topics)
|
||||||
|
{
|
||||||
|
bag_view_.reset(new rosbag::View(*bag_, rosbag::TopicQuery(topics)));
|
||||||
|
if (FLAGS_data_source_start_time_s != 0.0 ||
|
||||||
|
FLAGS_data_source_stop_time_s != 0.0)
|
||||||
|
{
|
||||||
|
CHECK_GE(FLAGS_data_source_start_time_s, 0);
|
||||||
|
CHECK_GE(FLAGS_data_source_stop_time_s, 0);
|
||||||
|
|
||||||
|
// Retrieve begin and end times from the bag file (given the topic query).
|
||||||
|
const ros::Time absolute_time_offset = bag_view_->getBeginTime();
|
||||||
|
VLOG(2) << "Bag begin time: " << absolute_time_offset;
|
||||||
|
const ros::Time absolute_end_time = bag_view_->getEndTime();
|
||||||
|
VLOG(2) << "Bag end time: " << absolute_end_time;
|
||||||
|
if (absolute_end_time < absolute_time_offset)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Invalid bag end time: "
|
||||||
|
<< absolute_end_time
|
||||||
|
<< ". Check that the bag file is properly indexed"
|
||||||
|
<< " by running 'rosbag reindex file.bag'.";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute start and stop time.
|
||||||
|
LOG(INFO) << "Starting rosbag at time: " << FLAGS_data_source_start_time_s;
|
||||||
|
const ros::Duration data_source_start_time(FLAGS_data_source_start_time_s);
|
||||||
|
const ros::Time absolute_start_time =
|
||||||
|
data_source_start_time.isZero() ?
|
||||||
|
absolute_time_offset : absolute_time_offset + data_source_start_time;
|
||||||
|
const ros::Duration data_source_stop_time(FLAGS_data_source_stop_time_s);
|
||||||
|
const ros::Time absolute_stop_time =
|
||||||
|
data_source_stop_time.isZero() ?
|
||||||
|
absolute_end_time : absolute_time_offset + data_source_stop_time;
|
||||||
|
|
||||||
|
// Ensure that the provided stop time is valid.
|
||||||
|
// When a bag file is corrupted / invalid the bag end time
|
||||||
|
// cannot be retrieved. Run rosbag info to check if the bag file
|
||||||
|
// is properly indexed.
|
||||||
|
if (absolute_stop_time < absolute_start_time)
|
||||||
|
{
|
||||||
|
LOG(ERROR) << "Provided stop time is less than bag begin time. "
|
||||||
|
<< "Please make sure to provide a valid stop time and "
|
||||||
|
<< "check that the bag file is properly indexed "
|
||||||
|
<< "by running 'rosbag reindex file.bag'.";
|
||||||
|
}
|
||||||
|
else if (absolute_stop_time > absolute_end_time)
|
||||||
|
{
|
||||||
|
LOG(ERROR) << "Provided stop time is greater than bag end time. "
|
||||||
|
<< "Please make sure to provide a valid stop time and "
|
||||||
|
<< "check that the bag file is properly indexed "
|
||||||
|
<< "by running 'rosbag reindex file.bag'.";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
VLOG(1) << "Absolute start time set to " << absolute_start_time;
|
||||||
|
VLOG(1) << "Absolute stop time set to " << absolute_stop_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the bag View
|
||||||
|
CHECK_GT(absolute_stop_time, absolute_start_time);
|
||||||
|
CHECK_LE(absolute_stop_time, absolute_end_time);
|
||||||
|
bag_view_.reset(new rosbag::View(*bag_, rosbag::TopicQuery(topics),
|
||||||
|
absolute_start_time, absolute_stop_time));
|
||||||
|
}
|
||||||
|
bag_view_it_ = bag_view_->begin();
|
||||||
|
|
||||||
|
// Ensure that topics exist
|
||||||
|
// The connection info only contains topics that are available in the bag
|
||||||
|
// If a topic is requested that is not avaiable, it does not show up in the info.
|
||||||
|
std::vector<const rosbag::ConnectionInfo*> connection_infos =
|
||||||
|
bag_view_->getConnections();
|
||||||
|
if (topics.size() != connection_infos.size())
|
||||||
|
{
|
||||||
|
LOG(ERROR) << "Successfully connected to " << connection_infos.size() << " topics:";
|
||||||
|
for (const rosbag::ConnectionInfo* info : connection_infos)
|
||||||
|
{
|
||||||
|
LOG(ERROR) << "*) " << info->topic;
|
||||||
|
}
|
||||||
|
LOG(ERROR) << "Requested " << topics.size() << " topics:";
|
||||||
|
for (const std::string topic : topics)
|
||||||
|
{
|
||||||
|
LOG(ERROR) << "*) " << topic;
|
||||||
|
}
|
||||||
|
LOG(FATAL) << "Not all requested topics founds in bagfile. "
|
||||||
|
<< "Is topic_cam0, topic_imu0, etc. set correctly? "
|
||||||
|
<< "Maybe removing/adding a slash as prefix solves the problem.";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DataProviderRosbag::numCameras() const
|
||||||
|
{
|
||||||
|
return img_topic_camidx_map_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderRosbag::spinOnce()
|
||||||
|
{
|
||||||
|
if (bag_view_it_ != bag_view_->end())
|
||||||
|
{
|
||||||
|
const rosbag::MessageInstance m = *bag_view_it_;
|
||||||
|
|
||||||
|
// Camera Messages:
|
||||||
|
const sensor_msgs::ImageConstPtr m_img = m.instantiate<sensor_msgs::Image>();
|
||||||
|
if (m_img && callback_)
|
||||||
|
{
|
||||||
|
if (!cameraSpin(m_img, m))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "No camera callback registered but measurements available";
|
||||||
|
}
|
||||||
|
|
||||||
|
++bag_view_it_;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderRosbag::cameraSpin(sensor_msgs::ImageConstPtr m_img,
|
||||||
|
const rosbag::MessageInstance& m)
|
||||||
|
{
|
||||||
|
auto it = img_topic_camidx_map_.find(m.getTopic());
|
||||||
|
if (it != img_topic_camidx_map_.end())
|
||||||
|
{
|
||||||
|
++n_processed_images_;
|
||||||
|
if (FLAGS_data_source_stop_after_n_frames > 0 &&
|
||||||
|
n_processed_images_ > FLAGS_data_source_stop_after_n_frames)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Data source has reached max number of desired frames.";
|
||||||
|
running_ = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv_bridge::CvImagePtr cv_ptr;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
cv_ptr = cv_bridge::toCvCopy(m_img, sensor_msgs::image_encodings::MONO8);
|
||||||
|
}
|
||||||
|
catch (cv_bridge::Exception& e)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "cv_bridge exception: %s", e.what();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv_ptr->image.convertTo(*(sim_data_.images[0]), cv::DataType<ImageFloatType>::type, 1./255.);
|
||||||
|
sim_data_.timestamp = static_cast<Time>(m_img->header.stamp.toNSec());
|
||||||
|
|
||||||
|
sim_data_.imu_updated = false;
|
||||||
|
sim_data_.images_updated = true;
|
||||||
|
sim_data_.depthmaps_updated = false;
|
||||||
|
sim_data_.optic_flows_updated = false;
|
||||||
|
sim_data_.twists_updated = false;
|
||||||
|
sim_data_.poses_updated = false;
|
||||||
|
|
||||||
|
callback_(sim_data_);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FIRST_N(WARNING, 1) << "Topic in bag that is not subscribed: " << m.getTopic();
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DataProviderRosbag::ok() const
|
||||||
|
{
|
||||||
|
if (!running_)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Data Provider was paused/terminated.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (bag_view_it_ == bag_view_->end())
|
||||||
|
{
|
||||||
|
VLOG(1) << "All data processed.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DataProviderRosbag::size() const
|
||||||
|
{
|
||||||
|
CHECK(bag_view_);
|
||||||
|
return bag_view_->size();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,199 @@
|
|||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <esim/data_provider/renderer_factory.hpp>
|
||||||
|
#include <esim/imp_planar_renderer/planar_renderer.hpp>
|
||||||
|
#include <esim/imp_panorama_renderer/panorama_renderer.hpp>
|
||||||
|
#include <esim/imp_opengl_renderer/opengl_renderer.hpp>
|
||||||
|
#include <esim/imp_unrealcv_renderer/unrealcv_renderer.hpp>
|
||||||
|
#include <esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp>
|
||||||
|
#include <ze/cameras/camera_models.hpp>
|
||||||
|
#include <ze/cameras/camera_impl.hpp>
|
||||||
|
#include <ze/common/logging.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <opencv2/imgproc/imgproc.hpp>
|
||||||
|
|
||||||
|
DEFINE_int32(renderer_type, 0, " 0: Planar renderer, 1: Panorama renderer, 2: OpenGL renderer");
|
||||||
|
|
||||||
|
DEFINE_string(renderer_texture, "",
|
||||||
|
"Path to image which will be used to texture the plane");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_hfov_cam_source_deg, 130.0,
|
||||||
|
"Horizontal FoV of the source camera (that captured the image on the plane)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_preprocess_median_blur, 0,
|
||||||
|
"Kernel size of the preprocessing median blur.");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_preprocess_gaussian_blur, 0,
|
||||||
|
"Amount of preprocessing Gaussian blur.");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_x, 0.0,
|
||||||
|
"x position of the center of the plane, in world coordinates");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_y, 0.0,
|
||||||
|
"y position of the center of the plane, in world coordinates");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_z, -1.0,
|
||||||
|
"z position of the center of the plane, in world coordinates");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_qw, 0.0,
|
||||||
|
"w component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_qx, 1.0,
|
||||||
|
"x component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_qy, 0.0,
|
||||||
|
"y component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_plane_qz, 0.0,
|
||||||
|
"z component of the quaternion q_W_P (orientation of the plane with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_z_min, 0.01,
|
||||||
|
"Minimum clipping distance.");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_z_max, 10.0,
|
||||||
|
"Maximum clipping distance.");
|
||||||
|
|
||||||
|
DEFINE_bool(renderer_extend_border, false,
|
||||||
|
"Whether to extend the borders of the plane to infinity or not.");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_panorama_qw, 0.0,
|
||||||
|
"w component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_panorama_qx, 1.0,
|
||||||
|
"x component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_panorama_qy, 0.0,
|
||||||
|
"y component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||||
|
|
||||||
|
DEFINE_double(renderer_panorama_qz, 0.0,
|
||||||
|
"z component of the quaternion q_W_P (orientation of the panorama with respect to the world)");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
bool loadPreprocessedImage(const std::string& path_to_img, cv::Mat* img)
|
||||||
|
{
|
||||||
|
CHECK(img);
|
||||||
|
VLOG(1) << "Loading texture file from file: " << FLAGS_renderer_texture << ".";
|
||||||
|
*img = cv::imread(path_to_img, 0);
|
||||||
|
if(!img->data)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Could not open image at: " << FLAGS_renderer_texture << ".";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(FLAGS_renderer_preprocess_median_blur > 1)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Pre-filtering the texture with median filter of size: "
|
||||||
|
<< FLAGS_renderer_preprocess_median_blur << ".";
|
||||||
|
cv::medianBlur(*img, *img, FLAGS_renderer_preprocess_median_blur);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(FLAGS_renderer_preprocess_gaussian_blur > 0)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Pre-filtering the texture with gaussian filter of size: "
|
||||||
|
<< FLAGS_renderer_preprocess_gaussian_blur << ".";
|
||||||
|
cv::GaussianBlur(*img, *img, cv::Size(21,21), FLAGS_renderer_preprocess_gaussian_blur);
|
||||||
|
}
|
||||||
|
|
||||||
|
img->convertTo(*img, cv::DataType<ImageFloatType>::type, 1.0/255.0);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Renderer::Ptr loadRendererFromGflags()
|
||||||
|
{
|
||||||
|
Renderer::Ptr renderer;
|
||||||
|
|
||||||
|
switch (FLAGS_renderer_type)
|
||||||
|
{
|
||||||
|
case 0: // Planar renderer
|
||||||
|
{
|
||||||
|
cv::Mat img_src;
|
||||||
|
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
double f_src = hfovToFocalLength(FLAGS_renderer_hfov_cam_source_deg, img_src.cols);
|
||||||
|
Camera::Ptr cam_src = std::make_shared<ze::PinholeCamera>(
|
||||||
|
img_src.cols, img_src.rows, ze::CameraType::Pinhole,
|
||||||
|
(Vector4() << f_src, f_src, 0.5 * img_src.cols, 0.5 * img_src.rows).finished(),
|
||||||
|
ze::VectorX());
|
||||||
|
|
||||||
|
Transformation T_W_P;
|
||||||
|
T_W_P.getPosition() = ze::Position(FLAGS_renderer_plane_x,
|
||||||
|
FLAGS_renderer_plane_y,
|
||||||
|
FLAGS_renderer_plane_z);
|
||||||
|
|
||||||
|
T_W_P.getRotation() = ze::Quaternion(FLAGS_renderer_plane_qw,
|
||||||
|
FLAGS_renderer_plane_qx,
|
||||||
|
FLAGS_renderer_plane_qy,
|
||||||
|
FLAGS_renderer_plane_qz);
|
||||||
|
|
||||||
|
renderer.reset(new PlanarRenderer(img_src, cam_src,
|
||||||
|
T_W_P,
|
||||||
|
FLAGS_renderer_z_min,
|
||||||
|
FLAGS_renderer_z_max,
|
||||||
|
FLAGS_renderer_extend_border));
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 1: // Panorama renderer
|
||||||
|
{
|
||||||
|
cv::Mat img_src;
|
||||||
|
if(!loadPreprocessedImage(FLAGS_renderer_texture, &img_src))
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Transformation::Rotation R_W_P;
|
||||||
|
R_W_P = ze::Quaternion(FLAGS_renderer_panorama_qw,
|
||||||
|
FLAGS_renderer_panorama_qx,
|
||||||
|
FLAGS_renderer_panorama_qy,
|
||||||
|
FLAGS_renderer_panorama_qz);
|
||||||
|
|
||||||
|
renderer.reset(new PanoramaRenderer(img_src, R_W_P));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 2: // OpenGL renderer
|
||||||
|
{
|
||||||
|
renderer.reset(new OpenGLRenderer());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 3: // UnrealCV renderer
|
||||||
|
{
|
||||||
|
renderer.reset(new UnrealCvRenderer());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Renderer type is not known.";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return renderer;
|
||||||
|
}
|
||||||
|
|
||||||
|
SimpleRenderer::Ptr loadSimpleRendererFromGflags()
|
||||||
|
{
|
||||||
|
SimpleRenderer::Ptr renderer;
|
||||||
|
|
||||||
|
switch (FLAGS_renderer_type)
|
||||||
|
{
|
||||||
|
case 0: // Multi-objects 2D renderer
|
||||||
|
{
|
||||||
|
renderer.reset(new MultiObject2DRenderer());
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Renderer type is not known.";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return renderer;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
8
event_camera_simulator/esim_msgs/CMakeLists.txt
Normal file
8
event_camera_simulator/esim_msgs/CMakeLists.txt
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project("esim_msgs" CXX)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
catkin_simple(ALL_DEPS_REQUIRED)
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
11
event_camera_simulator/esim_msgs/msg/OpticFlow.msg
Normal file
11
event_camera_simulator/esim_msgs/msg/OpticFlow.msg
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# This message contains an optic flow map
|
||||||
|
# (0, 0) is at top-left corner of image
|
||||||
|
# The optic flow x and y components are expressed in px/s and encoded with floating point accuracy.
|
||||||
|
# The memory layout of the data is row-major.
|
||||||
|
|
||||||
|
Header header
|
||||||
|
uint32 height
|
||||||
|
uint32 width
|
||||||
|
|
||||||
|
float32[] flow_x
|
||||||
|
float32[] flow_y
|
19
event_camera_simulator/esim_msgs/package.xml
Normal file
19
event_camera_simulator/esim_msgs/package.xml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_msgs</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>ROS message definitions for the event camera simulator.</description>
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
<license>GNU GPL</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>roscpp</depend>
|
||||||
|
<depend>std_msgs</depend>
|
||||||
|
<depend>sensor_msgs</depend>
|
||||||
|
|
||||||
|
<build_depend>message_generation</build_depend>
|
||||||
|
<exec_depend>message_runtime</exec_depend>
|
||||||
|
|
||||||
|
</package>
|
22
event_camera_simulator/esim_rendering/CMakeLists.txt
Normal file
22
event_camera_simulator/esim_rendering/CMakeLists.txt
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_rendering)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/rendering/renderer_base.hpp
|
||||||
|
include/esim/rendering/simple_renderer_base.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/renderer_base.cpp
|
||||||
|
src/simple_renderer_base.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,49 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
//! Represents a rendering engine that generates images (and other outputs,
|
||||||
|
//! such as depth maps, or optical flow maps) given a scene and a camera position.
|
||||||
|
class Renderer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ZE_POINTER_TYPEDEFS(Renderer);
|
||||||
|
|
||||||
|
Renderer() {}
|
||||||
|
|
||||||
|
//! Render an image at a given pose.
|
||||||
|
virtual void render(const Transformation& T_W_C,
|
||||||
|
const std::vector<Transformation>& T_W_OBJ,
|
||||||
|
const ImagePtr& out_image,
|
||||||
|
const DepthmapPtr& out_depthmap) const = 0;
|
||||||
|
|
||||||
|
|
||||||
|
//! Returns true if the rendering engine can compute optic flow, false otherwise
|
||||||
|
virtual bool canComputeOpticFlow() const = 0;
|
||||||
|
|
||||||
|
//! Render an image + depth map + optic flow map at a given pose,
|
||||||
|
//! given the camera linear and angular velocity
|
||||||
|
virtual void renderWithFlow(const Transformation& T_W_C,
|
||||||
|
const LinearVelocity& v_WC,
|
||||||
|
const AngularVelocity& w_WC,
|
||||||
|
const std::vector<Transformation>& T_W_OBJ,
|
||||||
|
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||||
|
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||||
|
const ImagePtr& out_image,
|
||||||
|
const DepthmapPtr& out_depthmap,
|
||||||
|
const OpticFlowPtr& optic_flow_map) const {}
|
||||||
|
|
||||||
|
//! Sets the camera
|
||||||
|
virtual void setCamera(const ze::Camera::Ptr& camera) = 0;
|
||||||
|
|
||||||
|
//! Get the camera rig
|
||||||
|
const ze::Camera::Ptr& getCamera() const { return camera_; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
ze::Camera::Ptr camera_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,32 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
//! Represents a rendering engine that generates images + optic flow maps
|
||||||
|
//! The rendering engine takes care of managing the environment and camera trajectory in the environment
|
||||||
|
class SimpleRenderer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ZE_POINTER_TYPEDEFS(SimpleRenderer);
|
||||||
|
|
||||||
|
SimpleRenderer() {}
|
||||||
|
|
||||||
|
//! Render an image + optic flow map at a given time t.
|
||||||
|
//! The rendering engine takes care of generating the camera trajectory, etc.
|
||||||
|
virtual bool render(const Time t,
|
||||||
|
const ImagePtr& out_image,
|
||||||
|
const OpticFlowPtr& optic_flow_map) const = 0;
|
||||||
|
|
||||||
|
//! Get the height of the image plane
|
||||||
|
virtual int getWidth() const = 0;
|
||||||
|
|
||||||
|
//! Get the width of the image plane
|
||||||
|
virtual int getHeight() const = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
20
event_camera_simulator/esim_rendering/package.xml
Normal file
20
event_camera_simulator/esim_rendering/package.xml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_rendering</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Rendering engines for the event camera simulator.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>ze_common</depend>
|
||||||
|
<depend>ze_cameras</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
@ -0,0 +1,5 @@
|
|||||||
|
#include <esim/rendering/renderer_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,5 @@
|
|||||||
|
#include <esim/rendering/simple_renderer_base.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
23
event_camera_simulator/esim_ros/CMakeLists.txt
Normal file
23
event_camera_simulator/esim_ros/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_ros)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/esim_node.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
# make the executable
|
||||||
|
cs_add_executable(esim_node src/esim_node.cpp)
|
||||||
|
|
||||||
|
# link the executable to the necessary libs
|
||||||
|
target_link_libraries(esim_node ${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
|
||||||
|
|
||||||
|
cs_install()
|
27
event_camera_simulator/esim_ros/cfg/calib/example.conf
Normal file
27
event_camera_simulator/esim_ros/cfg/calib/example.conf
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [198.245521288, 198.277025706, 120.0, 90.0]
|
||||||
|
distortion:
|
||||||
|
type: none
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 0
|
||||||
|
data: []
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, 0, 1, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, 0, 1]
|
27
event_camera_simulator/esim_ros/cfg/calib/ijrr.yaml
Normal file
27
event_camera_simulator/esim_ros/cfg/calib/ijrr.yaml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
label: "DAVIS-IJRR17"
|
||||||
|
id: a0652606b3d9fd6b62f5448a9e1304be
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs
|
||||||
|
id: 07e806d3367b4a6fa18a52178e4bbaf9
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [199.37143467278824, 199.7078674353677, 133.75438071590816, 113.99216995027632]
|
||||||
|
distortion:
|
||||||
|
type: radial-tangential
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [-0.38289915571515265, 0.18933636521343228, -0.0010024743349031492, -0.0005637766302076818]
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [0.999905117246, -0.0122374970355, 0.00632456886338, 0.00674696422488,
|
||||||
|
0.0121780171243, 0.999882045134, 0.00935904469797, 0.0007279224709,
|
||||||
|
-0.00643835413146, -0.00928113597812, 0.99993620202, 0.0342573613538,
|
||||||
|
0.0, 0.0, 0.0, 1.0]
|
27
event_camera_simulator/esim_ros/cfg/calib/pinhole_mono.yaml
Normal file
27
event_camera_simulator/esim_ros/cfg/calib/pinhole_mono.yaml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [198.245521288, 198.277025706, 142.064861206, 100.903484508]
|
||||||
|
distortion:
|
||||||
|
type: equidistant
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [-0.0506755889541, 0.0456313630037, -0.0825742639337, 0.0557104403236]
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, -1, 0,
|
||||||
|
0, 0, 0, 1]
|
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [200.0, 200.0, 120.0, 90.0]
|
||||||
|
distortion:
|
||||||
|
type: none
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 0
|
||||||
|
data: []
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, -1, 0,
|
||||||
|
0, 0, 0, 1]
|
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [200.0, 200.0, 120.0, 90.0]
|
||||||
|
distortion:
|
||||||
|
type: none
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 0
|
||||||
|
data: []
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [-1, 0, 0, 0,
|
||||||
|
0, 0, -1, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, 0, 1]
|
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 260
|
||||||
|
image_width: 346
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [235.90001575, 235.84153994, 170.45862273, 130.11022812]
|
||||||
|
distortion:
|
||||||
|
type: equidistant
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [-0.13853153, -0.04153486, 0.00805515, 0.00423045]
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, 0, 1, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, 0, 1]
|
27
event_camera_simulator/esim_ros/cfg/calib/quadrotor.yaml
Normal file
27
event_camera_simulator/esim_ros/cfg/calib/quadrotor.yaml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [250.0, 250.0, 120.0, 90.0]
|
||||||
|
distortion:
|
||||||
|
type: radial-tangential
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [0.0,0.0,0.0,0.0]
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, -1, 0,
|
||||||
|
0, 0, 0, 1]
|
54
event_camera_simulator/esim_ros/cfg/example.conf
Normal file
54
event_camera_simulator/esim_ros/cfg/example.conf
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
--vmodule=data_provider_online_render=0
|
||||||
|
--random_seed=50
|
||||||
|
--data_source=0
|
||||||
|
#--path_to_output_bag=/tmp/out.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.5
|
||||||
|
--contrast_threshold_neg=0.5
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=12.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_mono_nodistort.yaml
|
||||||
|
|
||||||
|
--renderer_type=0
|
||||||
|
--renderer_texture=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_planar_renderer/textures/office.jpg
|
||||||
|
--renderer_hfov_cam_source_deg=85.0
|
||||||
|
--renderer_preprocess_gaussian_blur=2.0
|
||||||
|
--renderer_preprocess_median_blur=13
|
||||||
|
--renderer_plane_x=0.0
|
||||||
|
--renderer_plane_y=0.0
|
||||||
|
--renderer_plane_z=-1.0
|
||||||
|
--renderer_plane_qw=0.0
|
||||||
|
--renderer_plane_qx=1.0
|
||||||
|
--renderer_plane_qy=0.0
|
||||||
|
--renderer_plane_qz=0.0
|
||||||
|
--renderer_extend_border=1
|
||||||
|
--renderer_zmin=1.0
|
||||||
|
|
||||||
|
--trajectory_type=0
|
||||||
|
--trajectory_length_s=100.0
|
||||||
|
--trajectory_sampling_frequency_hz=5
|
||||||
|
--trajectory_spline_order=5
|
||||||
|
--trajectory_num_spline_segments=100
|
||||||
|
--trajectory_lambda=0.1
|
||||||
|
--trajectory_multiplier_x=0.5
|
||||||
|
--trajectory_multiplier_y=0.5
|
||||||
|
--trajectory_multiplier_z=0.25
|
||||||
|
--trajectory_multiplier_wx=0.15
|
||||||
|
--trajectory_multiplier_wy=0.15
|
||||||
|
--trajectory_multiplier_wz=0.3
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=50.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=0
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=30
|
||||||
|
--ros_publisher_depth_rate=10
|
||||||
|
--ros_publisher_optic_flow_rate=10
|
||||||
|
--ros_publisher_pointcloud_rate=10
|
||||||
|
--ros_publisher_camera_info_rate=10
|
29
event_camera_simulator/esim_ros/cfg/multi_objects.conf
Normal file
29
event_camera_simulator/esim_ros/cfg/multi_objects.conf
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
--vmodule=data_provider_online_simple=0
|
||||||
|
--data_source=1
|
||||||
|
--path_to_output_bag=/tmp/out.bag
|
||||||
|
--path_to_sequence_file=/home/user/esim_ws/src/event_camera_simulator/event_camera_simulator/imp/imp_multi_objects_2d/scenes/example.scene
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.5
|
||||||
|
--contrast_threshold_neg=0.5
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=12.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--renderer_type=0
|
||||||
|
--renderer_preprocess_median_blur=11
|
||||||
|
--renderer_preprocess_gaussian_blur=1.0
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=20.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=40
|
||||||
|
--ros_publisher_depth_rate=10
|
||||||
|
--ros_publisher_optic_flow_rate=40
|
||||||
|
--ros_publisher_pointcloud_rate=10
|
||||||
|
--ros_publisher_camera_info_rate=10
|
||||||
|
|
48
event_camera_simulator/esim_ros/cfg/obstacle.conf
Normal file
48
event_camera_simulator/esim_ros/cfg/obstacle.conf
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
--v=0
|
||||||
|
--random_seed=2
|
||||||
|
--data_source=0
|
||||||
|
--path_to_output_bag=/tmp/out.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.5
|
||||||
|
--contrast_threshold_neg=0.5
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=12.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_mono_nodistort.yaml
|
||||||
|
--renderer_scene=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/flying_room/flying_room.obj
|
||||||
|
|
||||||
|
--renderer_type=2
|
||||||
|
--renderer_zmin=0.1
|
||||||
|
--renderer_zmax=40.0
|
||||||
|
|
||||||
|
--renderer_dynamic_objects_dir=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/dynamic_objects
|
||||||
|
--renderer_dynamic_objects=americanfootball.obj
|
||||||
|
|
||||||
|
--trajectory_type=1
|
||||||
|
--trajectory_spline_order=3
|
||||||
|
--trajectory_num_spline_segments=10
|
||||||
|
--trajectory_lambda=0.1
|
||||||
|
--trajectory_csv_file=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/flying_room/camera_trajectory.csv
|
||||||
|
|
||||||
|
--trajectory_dynamic_objects_type=1
|
||||||
|
--trajectory_dynamic_objects_spline_order=3
|
||||||
|
--trajectory_dynamic_objects_num_spline_segments=1
|
||||||
|
--trajectory_dynamic_objects_lambda=0.1
|
||||||
|
--trajectory_dynamic_objects_csv_dir=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/dynamic_objects
|
||||||
|
--trajectory_dynamic_objects_csv_file=americanfootball.csv
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=20.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
--simulation_post_gaussian_blur_sigma=0.15
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=30.0
|
||||||
|
--ros_publisher_depth_rate=30.0
|
||||||
|
--ros_publisher_optic_flow_rate=30.0
|
||||||
|
--ros_publisher_pointcloud_rate=1000
|
||||||
|
--ros_publisher_camera_info_rate=10
|
47
event_camera_simulator/esim_ros/cfg/opengl.conf
Normal file
47
event_camera_simulator/esim_ros/cfg/opengl.conf
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
--v=0
|
||||||
|
--random_seed=2
|
||||||
|
--data_source=0
|
||||||
|
--path_to_output_bag=/tmp/out.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.5
|
||||||
|
--contrast_threshold_neg=0.5
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=12.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_mono_nodistort.yaml
|
||||||
|
--renderer_scene=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/flying_room/flying_room.obj
|
||||||
|
--renderer_zmax=20
|
||||||
|
|
||||||
|
--renderer_type=2
|
||||||
|
--renderer_zmin=0.3
|
||||||
|
--renderer_zmax=40.0
|
||||||
|
|
||||||
|
--trajectory_type=0
|
||||||
|
--trajectory_length_s=10.0
|
||||||
|
--trajectory_sampling_frequency_hz=5
|
||||||
|
--trajectory_spline_order=5
|
||||||
|
--trajectory_num_spline_segments=10
|
||||||
|
--trajectory_lambda=0.01
|
||||||
|
--trajectory_multiplier_x=1.8
|
||||||
|
--trajectory_multiplier_y=1.8
|
||||||
|
--trajectory_multiplier_z=0.5
|
||||||
|
--trajectory_multiplier_wx=0.80
|
||||||
|
--trajectory_multiplier_wy=0.80
|
||||||
|
--trajectory_multiplier_wz=0.80
|
||||||
|
--trajectory_offset_z=0.0
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=20.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
--simulation_post_gaussian_blur_sigma=0.3
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=30.0
|
||||||
|
--ros_publisher_depth_rate=30.0
|
||||||
|
--ros_publisher_optic_flow_rate=30.0
|
||||||
|
--ros_publisher_pointcloud_rate=10
|
||||||
|
--ros_publisher_camera_info_rate=10
|
48
event_camera_simulator/esim_ros/cfg/panorama.conf
Normal file
48
event_camera_simulator/esim_ros/cfg/panorama.conf
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
--v=0
|
||||||
|
--data_source=0
|
||||||
|
--random_seed=0
|
||||||
|
--path_to_output_bag=/tmp/test_panorama.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.4
|
||||||
|
--contrast_threshold_neg=0.4
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=10.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_wide_fov.yaml
|
||||||
|
|
||||||
|
--renderer_type=1
|
||||||
|
--renderer_texture=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_panorama_renderer/textures/bicycle_parking.jpg
|
||||||
|
--renderer_preprocess_gaussian_blur=0.1
|
||||||
|
--renderer_preprocess_median_blur=5
|
||||||
|
--renderer_panorama_qw=0.707106781187
|
||||||
|
--renderer_panorama_qx=-0.707106781187
|
||||||
|
--renderer_panorama_qy=0.0
|
||||||
|
--renderer_panorama_qz=0.0
|
||||||
|
|
||||||
|
--trajectory_type=0
|
||||||
|
--trajectory_length_s=100.0
|
||||||
|
--trajectory_sampling_frequency_hz=5
|
||||||
|
--trajectory_spline_order=5
|
||||||
|
--trajectory_num_spline_segments=100
|
||||||
|
--trajectory_lambda=0.01
|
||||||
|
--trajectory_multiplier_x=0.0
|
||||||
|
--trajectory_multiplier_y=0
|
||||||
|
--trajectory_multiplier_z=0
|
||||||
|
--trajectory_multiplier_wx=1.0
|
||||||
|
--trajectory_multiplier_wy=0.0
|
||||||
|
--trajectory_multiplier_wz=4.0
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=30
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=30
|
||||||
|
--ros_publisher_depth_rate=1
|
||||||
|
--ros_publisher_optic_flow_rate=1
|
||||||
|
--ros_publisher_pointcloud_rate=1
|
||||||
|
--ros_publisher_camera_info_rate=10
|
37
event_camera_simulator/esim_ros/cfg/sponza.conf
Normal file
37
event_camera_simulator/esim_ros/cfg/sponza.conf
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
--vmodule=model=0
|
||||||
|
--data_source=0
|
||||||
|
--path_to_output_bag=/tmp/out.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.1
|
||||||
|
--contrast_threshold_neg=0.1
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=12.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_mono_nodistort.yaml
|
||||||
|
--renderer_scene=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/sponza/sponza.obj
|
||||||
|
--renderer_zmax=20
|
||||||
|
|
||||||
|
--renderer_type=2
|
||||||
|
--renderer_zmin=1.0
|
||||||
|
--renderer_zmax=40.0
|
||||||
|
|
||||||
|
--trajectory_type=1
|
||||||
|
--trajectory_spline_order=5
|
||||||
|
--trajectory_num_spline_segments=50
|
||||||
|
--trajectory_lambda=0
|
||||||
|
--trajectory_csv_file=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_opengl_renderer/resources/objects/sponza/camera_trajectory.csv
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=20.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=0.5
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=30.0
|
||||||
|
--ros_publisher_depth_rate=30.0
|
||||||
|
--ros_publisher_optic_flow_rate=30.0
|
||||||
|
--ros_publisher_pointcloud_rate=10
|
||||||
|
--ros_publisher_camera_info_rate=10
|
11611
event_camera_simulator/esim_ros/cfg/traj/poster_6dof.csv
Normal file
11611
event_camera_simulator/esim_ros/cfg/traj/poster_6dof.csv
Normal file
File diff suppressed because it is too large
Load Diff
5000
event_camera_simulator/esim_ros/cfg/traj/quadrotor_circles.csv
Normal file
5000
event_camera_simulator/esim_ros/cfg/traj/quadrotor_circles.csv
Normal file
File diff suppressed because it is too large
Load Diff
44
event_camera_simulator/esim_ros/cfg/unrealcv.conf
Normal file
44
event_camera_simulator/esim_ros/cfg/unrealcv.conf
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
--data_source=0
|
||||||
|
--vmodule=unrealcv_renderer=0
|
||||||
|
--path_to_output_bag=/tmp/out.bag
|
||||||
|
|
||||||
|
--contrast_threshold_pos=0.6
|
||||||
|
--contrast_threshold_neg=0.6
|
||||||
|
--contrast_threshold_sigma_pos=0
|
||||||
|
--contrast_threshold_sigma_neg=0
|
||||||
|
|
||||||
|
--exposure_time_ms=5.0
|
||||||
|
--use_log_image=1
|
||||||
|
--log_eps=0.001
|
||||||
|
|
||||||
|
--calib_filename=/home/user/esim_ws/src/rpg_esim/event_camera_simulator/esim_ros/cfg/calib/pinhole_mono_nodistort_forward.yaml
|
||||||
|
|
||||||
|
--renderer_type=3
|
||||||
|
|
||||||
|
--trajectory_type=0
|
||||||
|
--trajectory_length_s=100.0
|
||||||
|
--trajectory_sampling_frequency_hz=5
|
||||||
|
--trajectory_spline_order=5
|
||||||
|
--trajectory_num_spline_segments=100
|
||||||
|
--trajectory_lambda=0.1
|
||||||
|
--trajectory_multiplier_x=3.0
|
||||||
|
--trajectory_multiplier_y=3.0
|
||||||
|
--trajectory_multiplier_z=0.5
|
||||||
|
--trajectory_multiplier_wx=0.15
|
||||||
|
--trajectory_multiplier_wy=0.15
|
||||||
|
--trajectory_multiplier_wz=1.
|
||||||
|
|
||||||
|
--x_offset=0.0
|
||||||
|
--y_offset=0.0
|
||||||
|
--z_offset=1.0
|
||||||
|
|
||||||
|
--simulation_minimum_framerate=5.0
|
||||||
|
--simulation_imu_rate=1000.0
|
||||||
|
--simulation_adaptive_sampling_method=1
|
||||||
|
--simulation_adaptive_sampling_lambda=1.0
|
||||||
|
|
||||||
|
--ros_publisher_frame_rate=300
|
||||||
|
--ros_publisher_depth_rate=300
|
||||||
|
--ros_publisher_optic_flow_rate=200
|
||||||
|
--ros_publisher_pointcloud_rate=50
|
||||||
|
--ros_publisher_camera_info_rate=10
|
15
event_camera_simulator/esim_ros/launch/esim.launch
Normal file
15
event_camera_simulator/esim_ros/launch/esim.launch
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
<launch>
|
||||||
|
|
||||||
|
<arg name="config" />
|
||||||
|
|
||||||
|
<!-- Event camera simulator -->
|
||||||
|
<node name="esim_node" pkg="esim_ros" type="esim_node" args="
|
||||||
|
--v=1
|
||||||
|
--vmodule=data_provider_from_folder=10
|
||||||
|
--flagfile=$(find esim_ros)/$(arg config)
|
||||||
|
" output="screen"/>
|
||||||
|
|
||||||
|
<include file="$(find esim_ros)/launch/visualization.launch" />
|
||||||
|
|
||||||
|
|
||||||
|
</launch>
|
27
event_camera_simulator/esim_ros/launch/visualization.launch
Normal file
27
event_camera_simulator/esim_ros/launch/visualization.launch
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
<launch>
|
||||||
|
|
||||||
|
<!-- Visualization -->
|
||||||
|
<node name="dvs_renderer" pkg="dvs_renderer" type="dvs_renderer" output="screen" required="false">
|
||||||
|
<remap from="events" to="/cam0/events" />
|
||||||
|
<remap from="image" to="/cam0/image_corrupted" />
|
||||||
|
<remap from="dvs_rendering" to="dvs_rendering" />
|
||||||
|
</node>
|
||||||
|
|
||||||
|
<node name="optic_flow_viz" pkg="esim_visualization" type="optic_flow_converter.py" output="screen" required="false">
|
||||||
|
<param name="arrows_step" value="7" />
|
||||||
|
|
||||||
|
<param name="arrows_scale" value="0.07" />
|
||||||
|
<param name="arrows_upsample_factor" value="1" />
|
||||||
|
<param name="publish_rate" value="100" />
|
||||||
|
|
||||||
|
<remap from="flow" to="/cam0/optic_flow" />
|
||||||
|
</node>
|
||||||
|
|
||||||
|
<node pkg="hector_trajectory_server" type="hector_trajectory_server" name="hector_trajectory_server" output="screen" required="false">
|
||||||
|
<param name="target_frame_name" value="map"/>
|
||||||
|
<param name="source_frame_name" value="cam0"/>
|
||||||
|
<param name="trajectory_publish_rate" value="15"/>
|
||||||
|
<param name="trajectory_update_rate" value="15"/>
|
||||||
|
</node>
|
||||||
|
|
||||||
|
</launch>
|
22
event_camera_simulator/esim_ros/package.xml
Normal file
22
event_camera_simulator/esim_ros/package.xml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_ros</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>The event_camera_simulator package</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>esim</depend>
|
||||||
|
<depend>esim_data_provider</depend>
|
||||||
|
<depend>esim_visualization</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
<depend>dvs_renderer_advanced</depend>
|
||||||
|
|
||||||
|
</package>
|
124
event_camera_simulator/esim_ros/scripts/test_depth_formula.py
Normal file
124
event_camera_simulator/esim_ros/scripts/test_depth_formula.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Check the formula used to compute the depth map from a rotation, translation
|
||||||
|
and plane (parameterized by normal + distance).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from math import tan, pi
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
np.set_printoptions(suppress=True)
|
||||||
|
|
||||||
|
|
||||||
|
def skew(v):
|
||||||
|
"""Returns the skew-symmetric matrix of a vector"""
|
||||||
|
return np.array([[0, -v[2], v[1]],
|
||||||
|
[v[2], 0, -v[0]],
|
||||||
|
[-v[1], v[0], 0]], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def calibrationMatrixFromHFOV(hfov_deg, W, H):
|
||||||
|
f = 0.5 * W / tan(0.5 * hfov_deg * pi / 180.0)
|
||||||
|
K = np.array([[f, 0, 0.5 * W],
|
||||||
|
[0, f, 0.5 * H],
|
||||||
|
[0, 0, 1]]).astype(np.float64)
|
||||||
|
K_inv = np.linalg.inv(K)
|
||||||
|
return K, K_inv
|
||||||
|
|
||||||
|
|
||||||
|
def computeDepthmapAnalytic(R_01, t_01, n, d, K1, width, height):
|
||||||
|
K1_inv = np.linalg.inv(K1)
|
||||||
|
depth = np.zeros((height,width), dtype=np.float64)
|
||||||
|
for x in range(width):
|
||||||
|
for y in range(height):
|
||||||
|
X1 = np.array([x,y,1]).reshape((3,1))
|
||||||
|
X1 = K1_inv.dot(X1)
|
||||||
|
|
||||||
|
z = -(d+n.T.dot(t_01))/(n.T.dot(R_01).dot(X1))
|
||||||
|
depth[y,x] = z[0,0]
|
||||||
|
|
||||||
|
return depth
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
# Index 1 refers to cam (destination image)
|
||||||
|
# Index 0 refers to world (source image)
|
||||||
|
|
||||||
|
plt.close('all')
|
||||||
|
|
||||||
|
img = cv2.imread('../textures/carpet.jpg', 0).astype(np.float32)
|
||||||
|
img = img.astype(np.float32) / 255.0
|
||||||
|
|
||||||
|
hfov_plane_deg = 130.0
|
||||||
|
hfov_camera_deg = 90
|
||||||
|
|
||||||
|
H0, W0 = img.shape
|
||||||
|
K0, K0_inv = calibrationMatrixFromHFOV(hfov_plane_deg, W0, H0)
|
||||||
|
H1, W1 = 260, 346
|
||||||
|
K1, K1_inv = calibrationMatrixFromHFOV(hfov_camera_deg, W1, H1)
|
||||||
|
|
||||||
|
K2, K2_inv = K1, K1_inv
|
||||||
|
W2, H2 = W1, H1
|
||||||
|
|
||||||
|
n = np.array([-0.12,-0.05,1.0]).reshape((3,1))
|
||||||
|
n = n / np.linalg.norm(n)
|
||||||
|
d = -1.0
|
||||||
|
|
||||||
|
w_01 = (np.array([15.0, 5.0, -10.0]) * pi / 180.0).reshape((3,1)).astype(np.float64)
|
||||||
|
t_01 = np.array([-1.0,0.4,-0.1]).reshape((3,1))
|
||||||
|
R_01, _ = cv2.Rodrigues(w_01)
|
||||||
|
|
||||||
|
R_10 = R_01.T
|
||||||
|
t_10 = -R_01.T.dot(t_01)
|
||||||
|
|
||||||
|
R = R_10
|
||||||
|
t = t_10
|
||||||
|
C = -R.T.dot(t)
|
||||||
|
Hn_10 = R-1/d*t.dot(n.T)
|
||||||
|
|
||||||
|
Hn_01 = np.linalg.inv(Hn_10)
|
||||||
|
Hn_01_analytic = (np.eye(3) - 1.0/(d+n.T.dot(C))*C.dot(n.T)).dot(R.T) # analytic inverse
|
||||||
|
|
||||||
|
print('Test analytic inverse of H_01: {}'.format(np.allclose(Hn_01, Hn_01_analytic)))
|
||||||
|
|
||||||
|
H_10 = K1.dot(Hn_10).dot(K0_inv)
|
||||||
|
H_01 = np.linalg.inv(H_10)
|
||||||
|
|
||||||
|
warped = cv2.warpPerspective(img, H_01, dsize=(W1,H1), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
|
||||||
|
depth = computeDepthmapAnalytic(R_01, t_01, n, d, K1, W1, H1)
|
||||||
|
|
||||||
|
plt.figure()
|
||||||
|
plt.imshow(depth)
|
||||||
|
plt.colorbar()
|
||||||
|
plt.title('Analytical depth map')
|
||||||
|
|
||||||
|
x1,y1 = np.random.randint(0,W1), np.random.randint(0,H1)
|
||||||
|
X1 = np.array([x1,y1,1]).reshape((3,1))
|
||||||
|
X = K1_inv.dot(X1)
|
||||||
|
|
||||||
|
X0 = H_01.dot(X1)
|
||||||
|
X0[...] /= X0[2]
|
||||||
|
|
||||||
|
plt.figure()
|
||||||
|
plt.subplot(121)
|
||||||
|
plt.imshow(warped, cmap='gray')
|
||||||
|
plt.scatter(X1[0], X1[1])
|
||||||
|
plt.title('Warped image')
|
||||||
|
|
||||||
|
plt.subplot(122)
|
||||||
|
plt.imshow(img, cmap='gray')
|
||||||
|
plt.scatter(X0[0], X0[1], color='b')
|
||||||
|
plt.title('Source image')
|
||||||
|
|
||||||
|
# Check that the predicted depth indeed works to project X1 on image 0
|
||||||
|
z1 = depth[y1,x1]
|
||||||
|
P1 = z1 * K1_inv.dot(X1)
|
||||||
|
P0 = R_01.dot(P1) + t_01
|
||||||
|
X0_depth = K0.dot(P0)
|
||||||
|
X0_depth[...] /= X0_depth[2]
|
||||||
|
|
||||||
|
print('Test reprojection with analytical depth: {}'.format(np.allclose(X0, X0_depth)))
|
||||||
|
|
88
event_camera_simulator/esim_ros/src/esim_node.cpp
Normal file
88
event_camera_simulator/esim_ros/src/esim_node.cpp
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
#include <esim/esim/simulator.hpp>
|
||||||
|
#include <esim/visualization/ros_publisher.hpp>
|
||||||
|
#include <esim/visualization/rosbag_writer.hpp>
|
||||||
|
#include <esim/visualization/adaptive_sampling_benchmark_publisher.hpp>
|
||||||
|
#include <esim/visualization/synthetic_optic_flow_publisher.hpp>
|
||||||
|
#include <esim/data_provider/data_provider_factory.hpp>
|
||||||
|
|
||||||
|
#include <glog/logging.h>
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
|
||||||
|
DEFINE_double(contrast_threshold_pos, 1.0,
|
||||||
|
"Contrast threshold (positive)");
|
||||||
|
|
||||||
|
DEFINE_double(contrast_threshold_neg, 1.0,
|
||||||
|
"Contrast threshold (negative))");
|
||||||
|
|
||||||
|
DEFINE_double(contrast_threshold_sigma_pos, 0.021,
|
||||||
|
"Standard deviation of contrast threshold (positive)");
|
||||||
|
|
||||||
|
DEFINE_double(contrast_threshold_sigma_neg, 0.021,
|
||||||
|
"Standard deviation of contrast threshold (negative))");
|
||||||
|
|
||||||
|
DEFINE_int64(refractory_period_ns, 100000,
|
||||||
|
"Refractory period (time during which a pixel cannot fire events just after it fired one), in nanoseconds");
|
||||||
|
|
||||||
|
DEFINE_double(exposure_time_ms, 10.0,
|
||||||
|
"Exposure time in milliseconds, used to simulate motion blur");
|
||||||
|
|
||||||
|
DEFINE_bool(use_log_image, true,
|
||||||
|
"Whether to convert images to log images in the preprocessing step.");
|
||||||
|
|
||||||
|
DEFINE_double(log_eps, 0.001,
|
||||||
|
"Epsilon value used to convert images to log: L = log(eps + I / 255.0).");
|
||||||
|
|
||||||
|
DEFINE_int32(random_seed, 0,
|
||||||
|
"Random seed used to generate the trajectories. If set to 0 the current time(0) is taken as seed.");
|
||||||
|
|
||||||
|
using namespace event_camera_simulator;
|
||||||
|
|
||||||
|
int main(int argc, char** argv)
|
||||||
|
{
|
||||||
|
google::InitGoogleLogging(argv[0]);
|
||||||
|
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||||
|
google::InstallFailureSignalHandler();
|
||||||
|
FLAGS_alsologtostderr = true;
|
||||||
|
FLAGS_colorlogtostderr = true;
|
||||||
|
|
||||||
|
if (FLAGS_random_seed == 0) FLAGS_random_seed = (unsigned int) time(0);
|
||||||
|
srand(FLAGS_random_seed);
|
||||||
|
|
||||||
|
DataProviderBase::Ptr data_provider_ =
|
||||||
|
loadDataProviderFromGflags();
|
||||||
|
CHECK(data_provider_);
|
||||||
|
|
||||||
|
EventSimulator::Config event_sim_config;
|
||||||
|
event_sim_config.Cp = FLAGS_contrast_threshold_pos;
|
||||||
|
event_sim_config.Cm = FLAGS_contrast_threshold_neg;
|
||||||
|
event_sim_config.sigma_Cp = FLAGS_contrast_threshold_sigma_pos;
|
||||||
|
event_sim_config.sigma_Cm = FLAGS_contrast_threshold_sigma_neg;
|
||||||
|
event_sim_config.refractory_period_ns = FLAGS_refractory_period_ns;
|
||||||
|
event_sim_config.use_log_image = FLAGS_use_log_image;
|
||||||
|
event_sim_config.log_eps = FLAGS_log_eps;
|
||||||
|
std::shared_ptr<Simulator> sim;
|
||||||
|
sim.reset(new Simulator(data_provider_->numCameras(),
|
||||||
|
event_sim_config,
|
||||||
|
FLAGS_exposure_time_ms));
|
||||||
|
CHECK(sim);
|
||||||
|
|
||||||
|
Publisher::Ptr ros_publisher = std::make_shared<RosPublisher>(data_provider_->numCameras());
|
||||||
|
Publisher::Ptr rosbag_writer = RosbagWriter::createBagWriterFromGflags(data_provider_->numCameras());
|
||||||
|
Publisher::Ptr adaptive_sampling_benchmark_publisher
|
||||||
|
= AdaptiveSamplingBenchmarkPublisher::createFromGflags();
|
||||||
|
|
||||||
|
Publisher::Ptr synthetic_optic_flow_publisher
|
||||||
|
= SyntheticOpticFlowPublisher::createFromGflags();
|
||||||
|
|
||||||
|
if(ros_publisher) sim->addPublisher(ros_publisher);
|
||||||
|
if(rosbag_writer) sim->addPublisher(rosbag_writer);
|
||||||
|
if(adaptive_sampling_benchmark_publisher) sim->addPublisher(adaptive_sampling_benchmark_publisher);
|
||||||
|
if(synthetic_optic_flow_publisher) sim->addPublisher(synthetic_optic_flow_publisher);
|
||||||
|
|
||||||
|
data_provider_->registerCallback(
|
||||||
|
std::bind(&Simulator::dataProviderCallback, sim.get(),
|
||||||
|
std::placeholders::_1));
|
||||||
|
|
||||||
|
data_provider_->spin();
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,27 @@
|
|||||||
|
label: "simulated_camera"
|
||||||
|
id: 433585ebda8d1431223927a14788a127
|
||||||
|
cameras:
|
||||||
|
- camera:
|
||||||
|
label: dvs0
|
||||||
|
id: a0fba5412e961934d842d3a2a78e5cba
|
||||||
|
line-delay-nanoseconds: 0
|
||||||
|
image_height: 180
|
||||||
|
image_width: 240
|
||||||
|
type: pinhole
|
||||||
|
intrinsics:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [198.245521288, 198.277025706, 142.064861206, 100.903484508]
|
||||||
|
distortion:
|
||||||
|
type: equidistant
|
||||||
|
parameters:
|
||||||
|
cols: 1
|
||||||
|
rows: 4
|
||||||
|
data: [-0.0506755889541, 0.0456313630037, -0.0825742639337, 0.0557104403236]
|
||||||
|
T_B_C:
|
||||||
|
cols: 4
|
||||||
|
rows: 4
|
||||||
|
data: [1, 0, 0, 0,
|
||||||
|
0, -1, 0, 0,
|
||||||
|
0, 0, -1, 0,
|
||||||
|
0, 0, 0, 1]
|
20
event_camera_simulator/esim_trajectory/CMakeLists.txt
Normal file
20
event_camera_simulator/esim_trajectory/CMakeLists.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_trajectory)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/trajectory/trajectory_factory.hpp
|
||||||
|
include/esim/trajectory/imu_factory.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/trajectory_factory.cpp
|
||||||
|
src/imu_factory.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,11 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <ze/vi_simulation/trajectory_simulator.hpp>
|
||||||
|
#include <ze/vi_simulation/imu_simulator.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
ze::ImuSimulator::Ptr loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr &trajectory);
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,10 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <ze/vi_simulation/trajectory_simulator.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
std::tuple<ze::TrajectorySimulator::Ptr, std::vector<ze::TrajectorySimulator::Ptr>> loadTrajectorySimulatorFromGflags();
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
19
event_camera_simulator/esim_trajectory/package.xml
Normal file
19
event_camera_simulator/esim_trajectory/package.xml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_trajectory</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Trajectory generators for the event camera simulator.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>ze_vi_simulation</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
50
event_camera_simulator/esim_trajectory/src/imu_factory.cpp
Normal file
50
event_camera_simulator/esim_trajectory/src/imu_factory.cpp
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
#include <ze/common/logging.hpp>
|
||||||
|
#include <esim/trajectory/imu_factory.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
ze::ImuSimulator::Ptr loadImuSimulatorFromGflags(const ze::TrajectorySimulator::Ptr& trajectory)
|
||||||
|
{
|
||||||
|
ze::ImuSimulator::Ptr imu;
|
||||||
|
|
||||||
|
const ze::real_t gyr_bias_noise_sigma = 0.0000266;
|
||||||
|
const ze::real_t acc_bias_noise_sigma = 0.000433;
|
||||||
|
const ze::real_t gyr_noise_sigma = 0.000186;
|
||||||
|
const ze::real_t acc_noise_sigma = 0.00186;
|
||||||
|
const uint32_t imu_bandwidth_hz = 200;
|
||||||
|
const ze::real_t gravity_magnitude = 9.81;
|
||||||
|
|
||||||
|
ze::ImuBiasSimulator::Ptr bias;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
VLOG(1) << "Initialize bias ...";
|
||||||
|
bias = std::make_shared<ze::ContinuousBiasSimulator>(
|
||||||
|
ze::Vector3::Constant(gyr_bias_noise_sigma),
|
||||||
|
ze::Vector3::Constant(acc_bias_noise_sigma),
|
||||||
|
trajectory->start(),
|
||||||
|
trajectory->end(),
|
||||||
|
100); // Results in malloc: (trajectory->end() - trajectory->start()) * imu_bandwidth_hz);
|
||||||
|
VLOG(1) << "done.";
|
||||||
|
}
|
||||||
|
catch (const std::bad_alloc& e)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Could not create bias because number of samples is too high."
|
||||||
|
<< " Allocation failed: " << e.what();
|
||||||
|
}
|
||||||
|
|
||||||
|
VLOG(1) << "Initialize IMU ...";
|
||||||
|
imu = std::make_shared<ze::ImuSimulator>(
|
||||||
|
trajectory,
|
||||||
|
bias,
|
||||||
|
ze::RandomVectorSampler<3>::sigmas(ze::Vector3::Constant(acc_noise_sigma)),
|
||||||
|
ze::RandomVectorSampler<3>::sigmas(ze::Vector3::Constant(gyr_noise_sigma)),
|
||||||
|
imu_bandwidth_hz,
|
||||||
|
imu_bandwidth_hz,
|
||||||
|
gravity_magnitude);
|
||||||
|
VLOG(1) << "done.";
|
||||||
|
|
||||||
|
return imu;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
||||||
|
|
@ -0,0 +1,236 @@
|
|||||||
|
#include <esim/trajectory/trajectory_factory.hpp>
|
||||||
|
#include <ze/common/csv_trajectory.hpp>
|
||||||
|
#include <ze/common/logging.hpp>
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_type, 0, " 0: Random spline trajectory, 1: Load trajectory from CSV file");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_length_s, 100.0,
|
||||||
|
"Length of the trajectory, in seconds"
|
||||||
|
"(used when the trajectory type is random spline)");
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_spline_order, 5,
|
||||||
|
"Spline order of the trajectory");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_sampling_frequency_hz, 5,
|
||||||
|
"Sampling frequency of the spline trajectory, i.e."
|
||||||
|
"number of random poses generated per second along the trajectory");
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_num_spline_segments, 100,
|
||||||
|
"Number of spline segments used for the trajectory");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_lambda, 0.1,
|
||||||
|
"Smoothing factor for the spline trajectories."
|
||||||
|
"Low value = less smooth, high value = more smooth");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_x, 1.0,
|
||||||
|
"Scaling factor for the X camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_y, 1.0,
|
||||||
|
"Scaling factor for the y camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_z, 1.0,
|
||||||
|
"Scaling factor for the z camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_wx, 1.0,
|
||||||
|
"Scaling factor for the x orientation axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_wy, 1.0,
|
||||||
|
"Scaling factor for the y orientation axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_multiplier_wz, 1.0,
|
||||||
|
"Scaling factor for the z orientation axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_x, 0.0,
|
||||||
|
"Offset for the X camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_y, 0.0,
|
||||||
|
"Offset for the y camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_z, 0.0,
|
||||||
|
"Offset for the z camera axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_wx, 0.0,
|
||||||
|
"Offset for the x orientation axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_wy, 0.0,
|
||||||
|
"Offset for the y orientation axis");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_offset_wz, 0.0,
|
||||||
|
"Offset for the z orientation axis");
|
||||||
|
|
||||||
|
DEFINE_string(trajectory_csv_file, "",
|
||||||
|
"CSV file containing the trajectory");
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_dynamic_objects_type, 1, " 0: Random spline trajectory, 1: Load trajectory from CSV file");
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_dynamic_objects_spline_order, 5,
|
||||||
|
"Spline order of the trajectory");
|
||||||
|
|
||||||
|
DEFINE_int32(trajectory_dynamic_objects_num_spline_segments, 100,
|
||||||
|
"Number of spline segments used for the trajectory");
|
||||||
|
|
||||||
|
DEFINE_double(trajectory_dynamic_objects_lambda, 0.1,
|
||||||
|
"Smoothing factor for the spline trajectories."
|
||||||
|
"Low value = less smooth, high value = more smooth");
|
||||||
|
|
||||||
|
DEFINE_string(trajectory_dynamic_objects_csv_dir, "",
|
||||||
|
"Directory containing CSV file of trajectory for dynamic objects");
|
||||||
|
|
||||||
|
DEFINE_string(trajectory_dynamic_objects_csv_file, "",
|
||||||
|
"CSV file containing the trajectory");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
std::tuple<ze::TrajectorySimulator::Ptr, std::vector<ze::TrajectorySimulator::Ptr>> loadTrajectorySimulatorFromGflags()
|
||||||
|
{
|
||||||
|
ze::TrajectorySimulator::Ptr trajectory;
|
||||||
|
|
||||||
|
std::vector<ze::TrajectorySimulator::Ptr> trajectories_dynamic_objects;
|
||||||
|
bool dynamic_object = false;
|
||||||
|
|
||||||
|
size_t p_start, p_end;
|
||||||
|
p_start = 0;
|
||||||
|
while(1)
|
||||||
|
{
|
||||||
|
int trajectory_type;
|
||||||
|
if (dynamic_object)
|
||||||
|
{
|
||||||
|
trajectory_type = FLAGS_trajectory_dynamic_objects_type;
|
||||||
|
if (trajectory_type != 1)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Only supporting trajectories of type 1 for dynamic objects!";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
trajectory_type = FLAGS_trajectory_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
// set path for dynamics objects
|
||||||
|
std::string csv_path;
|
||||||
|
bool should_break = false;
|
||||||
|
|
||||||
|
if (dynamic_object)
|
||||||
|
{
|
||||||
|
if ((p_end = FLAGS_trajectory_dynamic_objects_csv_file.find(";",p_start)) != std::string::npos)
|
||||||
|
{
|
||||||
|
csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/" + FLAGS_trajectory_dynamic_objects_csv_file.substr(p_start, p_end - p_start);
|
||||||
|
|
||||||
|
p_start = p_end + 1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
csv_path = FLAGS_trajectory_dynamic_objects_csv_dir + "/" + FLAGS_trajectory_dynamic_objects_csv_file.substr(p_start, p_end - p_start);
|
||||||
|
should_break = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
csv_path = FLAGS_trajectory_csv_file;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (trajectory_type)
|
||||||
|
{
|
||||||
|
case 0: // Random spline
|
||||||
|
{
|
||||||
|
std::shared_ptr<ze::BSplinePoseMinimalRotationVector> pbs =
|
||||||
|
std::make_shared<ze::BSplinePoseMinimalRotationVector>(FLAGS_trajectory_spline_order);
|
||||||
|
ze::MatrixX poses;
|
||||||
|
ze::VectorX times = ze::VectorX::LinSpaced(FLAGS_trajectory_sampling_frequency_hz * FLAGS_trajectory_length_s,
|
||||||
|
0,
|
||||||
|
FLAGS_trajectory_length_s);
|
||||||
|
poses.resize(6, times.size());
|
||||||
|
poses.setRandom();
|
||||||
|
poses.row(0) *= FLAGS_trajectory_multiplier_x;
|
||||||
|
poses.row(1) *= FLAGS_trajectory_multiplier_y;
|
||||||
|
poses.row(2) *= FLAGS_trajectory_multiplier_z;
|
||||||
|
poses.row(3) *= FLAGS_trajectory_multiplier_wx;
|
||||||
|
poses.row(4) *= FLAGS_trajectory_multiplier_wy;
|
||||||
|
poses.row(5) *= FLAGS_trajectory_multiplier_wz;
|
||||||
|
poses.row(0).array() += FLAGS_trajectory_offset_x;
|
||||||
|
poses.row(1).array() += FLAGS_trajectory_offset_y;
|
||||||
|
poses.row(2).array() += FLAGS_trajectory_offset_z;
|
||||||
|
poses.row(3).array() += FLAGS_trajectory_offset_wx;
|
||||||
|
poses.row(4).array() += FLAGS_trajectory_offset_wy;
|
||||||
|
poses.row(5).array() += FLAGS_trajectory_offset_wz;
|
||||||
|
pbs->initPoseSpline3(times, poses, FLAGS_trajectory_num_spline_segments, FLAGS_trajectory_lambda);
|
||||||
|
trajectory.reset(new ze::SplineTrajectorySimulator(pbs));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 1: // Load from CSV file
|
||||||
|
{
|
||||||
|
// Create trajectory:
|
||||||
|
ze::PoseSeries pose_series;
|
||||||
|
|
||||||
|
LOG(INFO) << "Reading trajectory from CSV file: " << csv_path;
|
||||||
|
pose_series.load(csv_path);
|
||||||
|
|
||||||
|
ze::StampedTransformationVector poses = pose_series.getStampedTransformationVector();
|
||||||
|
|
||||||
|
// Set start time of trajectory to zero.
|
||||||
|
const int64_t offset = poses.at(0).first;
|
||||||
|
for (ze::StampedTransformation& it : poses)
|
||||||
|
{
|
||||||
|
it.first -= offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(INFO) << "Initializing spline from trajectory (this may take some sime)...";
|
||||||
|
|
||||||
|
int spline_order, num_spline_segments;
|
||||||
|
double lambda;
|
||||||
|
if (dynamic_object)
|
||||||
|
{
|
||||||
|
spline_order = FLAGS_trajectory_dynamic_objects_spline_order;
|
||||||
|
num_spline_segments = FLAGS_trajectory_dynamic_objects_num_spline_segments;
|
||||||
|
lambda = FLAGS_trajectory_dynamic_objects_lambda;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
spline_order = FLAGS_trajectory_spline_order;
|
||||||
|
num_spline_segments = FLAGS_trajectory_num_spline_segments;
|
||||||
|
lambda = FLAGS_trajectory_lambda;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ze::BSplinePoseMinimalRotationVector> bs =
|
||||||
|
std::make_shared<ze::BSplinePoseMinimalRotationVector>(spline_order);
|
||||||
|
bs->initPoseSplinePoses(poses, num_spline_segments, lambda);
|
||||||
|
if (dynamic_object)
|
||||||
|
{
|
||||||
|
trajectories_dynamic_objects.push_back(std::make_shared<ze::SplineTrajectorySimulator>(bs));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
trajectory = std::make_shared<ze::SplineTrajectorySimulator>(bs);
|
||||||
|
}
|
||||||
|
LOG(INFO) << "Done!";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Trajectory type is not known.";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dynamic_object)
|
||||||
|
{
|
||||||
|
if (!FLAGS_trajectory_dynamic_objects_csv_dir.empty() && !FLAGS_trajectory_dynamic_objects_csv_file.empty())
|
||||||
|
{
|
||||||
|
dynamic_object = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (should_break)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_tuple(trajectory, trajectories_dynamic_objects);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
31
event_camera_simulator/esim_unrealcv_bridge/CMakeLists.txt
Normal file
31
event_camera_simulator/esim_unrealcv_bridge/CMakeLists.txt
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_unrealcv_bridge)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
add_definitions(-std=c++11)
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/unrealcv_bridge/unrealcv_bridge.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/unrealcv_bridge.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES} ${CMAKE_DL_LIBS} -lpthread -lboost_filesystem -lboost_system)
|
||||||
|
|
||||||
|
##########
|
||||||
|
# TESTS #
|
||||||
|
##########
|
||||||
|
|
||||||
|
cs_add_executable(test_unrealcv_bridge test/test_unrealcv_bridge.cpp)
|
||||||
|
target_link_libraries(test_unrealcv_bridge ${PROJECT_NAME})
|
||||||
|
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,77 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <boost/format.hpp>
|
||||||
|
#include <boost/algorithm/string/predicate.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <istream>
|
||||||
|
#include <ostream>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include <opencv2/core/core.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
using boost::asio::ip::tcp;
|
||||||
|
|
||||||
|
struct CameraData {
|
||||||
|
uint32_t id;
|
||||||
|
double_t pitch;
|
||||||
|
double_t yaw;
|
||||||
|
double_t roll;
|
||||||
|
double_t x;
|
||||||
|
double_t y;
|
||||||
|
double_t z;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class UnrealCvClient {
|
||||||
|
public:
|
||||||
|
UnrealCvClient(std::string host, std::string port);
|
||||||
|
~UnrealCvClient();
|
||||||
|
|
||||||
|
void saveImage(uint32_t camid, std::string path);
|
||||||
|
cv::Mat getImage(uint32_t camid);
|
||||||
|
cv::Mat getDepth(uint32_t camid);
|
||||||
|
void setCamera(const CameraData & camera);
|
||||||
|
void setCameraFOV(float hfov_deg);
|
||||||
|
void setCameraSize(int width, int height);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
void sendCommand(std::string command);
|
||||||
|
|
||||||
|
template<typename Result>
|
||||||
|
Result sendCommand(std::string command, std::function<Result(std::istream&, uint32_t)> mapf);
|
||||||
|
|
||||||
|
void send(std::string msg, uint32_t counter);
|
||||||
|
|
||||||
|
template<typename Result>
|
||||||
|
Result receive(std::function<Result(std::istream&, uint32_t)> parser);
|
||||||
|
|
||||||
|
//must stand before socket_ because of c++ initialization order
|
||||||
|
boost::asio::io_service io_service_;
|
||||||
|
tcp::socket socket_;
|
||||||
|
mutable uint32_t counter_;
|
||||||
|
uint32_t delay_;
|
||||||
|
boost::format angular_format_;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void sleep(uint32_t delay);
|
||||||
|
void handleError(boost::system::error_code ec);
|
||||||
|
std::string istreamToString(std::istream& stream, uint32_t size);
|
||||||
|
|
||||||
|
void parse_npy_header(unsigned char* buffer,
|
||||||
|
size_t& word_size,
|
||||||
|
std::vector<size_t>& shape,
|
||||||
|
bool& fortran_order);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
17
event_camera_simulator/esim_unrealcv_bridge/package.xml
Normal file
17
event_camera_simulator/esim_unrealcv_bridge/package.xml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_unrealcv_bridge</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>C++ client for UnrealCV.</description>
|
||||||
|
|
||||||
|
<maintainer email="rptheiler@gmail.com">Raffael Theiler</maintainer>
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
@ -0,0 +1,368 @@
|
|||||||
|
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
using boost::asio::ip::tcp;
|
||||||
|
|
||||||
|
// from: https://www.boost.org/doc/libs/1_47_0/doc/html/boost_asio/reference/connect/overload6.html
|
||||||
|
struct unrealcv_server_connect_condition
|
||||||
|
{
|
||||||
|
template <typename Iterator>
|
||||||
|
Iterator operator()(
|
||||||
|
const boost::system::error_code& ec,
|
||||||
|
Iterator next)
|
||||||
|
{
|
||||||
|
if(ec)
|
||||||
|
{
|
||||||
|
LOG(ERROR) << ec.message();
|
||||||
|
}
|
||||||
|
LOG(INFO) << "Trying: " << next->endpoint();
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
UnrealCvClient::UnrealCvClient(std::string host, std::string port)
|
||||||
|
: io_service_(),
|
||||||
|
socket_(io_service_),
|
||||||
|
counter_(0),
|
||||||
|
delay_(30){
|
||||||
|
|
||||||
|
tcp::resolver resolver(io_service_);
|
||||||
|
tcp::resolver::query query(host, port);
|
||||||
|
tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
|
||||||
|
|
||||||
|
boost::system::error_code ec;
|
||||||
|
boost::asio::connect(socket_, endpoint_iterator, unrealcv_server_connect_condition(), ec);
|
||||||
|
if(ec)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Could not connect to UnrealCV server";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sleep(500); // long sleep to initiate
|
||||||
|
|
||||||
|
// receive the first "we are connected" string
|
||||||
|
receive<std::string>([this] (std::istream& stream, uint32_t size) -> std::string {
|
||||||
|
return this->istreamToString(stream, size);
|
||||||
|
});
|
||||||
|
|
||||||
|
sleep(delay_);
|
||||||
|
sendCommand("vrun r.AmbientOcclusionLevels 0");
|
||||||
|
sendCommand("vrun r.LensFlareQuality 0");
|
||||||
|
sendCommand("vrun r.DefaultFeature.AntiAliasing 2");
|
||||||
|
sendCommand("vrun r.DefaultFeature.MotionBlur 0");
|
||||||
|
sendCommand("vrun r.PostProcessAAQuality 6");
|
||||||
|
}
|
||||||
|
|
||||||
|
UnrealCvClient::~UnrealCvClient() {
|
||||||
|
socket_.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::saveImage(uint32_t camid, std::string path)
|
||||||
|
{
|
||||||
|
std::string req = (boost::format("vget /camera/%i/lit %s") % camid % path).str();
|
||||||
|
sendCommand(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Mat UnrealCvClient::getImage(uint32_t camid)
|
||||||
|
{
|
||||||
|
std::string req = (boost::format("vget /camera/%i/lit png") % camid).str();
|
||||||
|
|
||||||
|
return sendCommand<cv::Mat>(req, [](std::istream& stream, uint32_t size){
|
||||||
|
std::vector<char> data(size);
|
||||||
|
stream.read(data.data(), size);
|
||||||
|
cv::Mat matrixPng = cv::imdecode(cv::Mat(data), 1);
|
||||||
|
return matrixPng.clone();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Mat UnrealCvClient::getDepth(uint32_t camid)
|
||||||
|
{
|
||||||
|
std::string req = (boost::format("vget /camera/%i/depth npy") % camid).str();
|
||||||
|
|
||||||
|
return sendCommand<cv::Mat>(req, [this](std::istream& stream, uint32_t size){
|
||||||
|
|
||||||
|
std::vector<char> data(size);
|
||||||
|
stream.read(data.data(), size);
|
||||||
|
unsigned char* buffer = (unsigned char *)data.data();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Gather data from the header
|
||||||
|
*/
|
||||||
|
std::vector<size_t> img_dims; //if appending, the shape of existing + new data
|
||||||
|
size_t word_size;
|
||||||
|
bool fortran_order;
|
||||||
|
parse_npy_header(buffer, word_size, img_dims, fortran_order);
|
||||||
|
|
||||||
|
// https://docs.scipy.org/doc/numpy/neps/npy-format.html
|
||||||
|
std::string npy_magic(reinterpret_cast<char*>(buffer),6);
|
||||||
|
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer+6);
|
||||||
|
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer+7);
|
||||||
|
uint16_t header_str_len = *reinterpret_cast<uint16_t*>(buffer+8);
|
||||||
|
std::string header(reinterpret_cast<char*>(buffer+9),header_str_len);
|
||||||
|
|
||||||
|
uint16_t header_total_len = 10 + header_str_len;
|
||||||
|
uint32_t data_length = data.size() - header_total_len;
|
||||||
|
uint32_t num_pixel = img_dims.at(0) * img_dims.at(1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that the data is okay
|
||||||
|
*/
|
||||||
|
if(!(major_version == 1 &&
|
||||||
|
minor_version == 0 &&
|
||||||
|
npy_magic.find("NUMPY") != std::string::npos)){
|
||||||
|
throw std::runtime_error("Npy header data not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!(data_length == (num_pixel * sizeof(float_t)))) {
|
||||||
|
throw std::runtime_error("Npy array data shape does not match the image size");
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read and convert the data
|
||||||
|
*/
|
||||||
|
cv::Mat matrix_f32 = cv::Mat(img_dims.at(0), img_dims.at(1),
|
||||||
|
CV_32F, buffer + header_total_len).clone();
|
||||||
|
|
||||||
|
return matrix_f32;
|
||||||
|
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void UnrealCvClient::setCamera(const CameraData & camera)
|
||||||
|
{
|
||||||
|
std::string cam_pose_s = (boost::format("vset /camera/%i/pose %.5f %.5f %.5f %.5f %.5f %.5f") %
|
||||||
|
camera.id %
|
||||||
|
camera.x %
|
||||||
|
camera.y %
|
||||||
|
camera.z %
|
||||||
|
camera.pitch %
|
||||||
|
camera.yaw %
|
||||||
|
camera.roll).str();
|
||||||
|
|
||||||
|
sendCommand(cam_pose_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::setCameraSize(int width, int height)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Setting the camera size to: " << width << "x" << height;
|
||||||
|
std::string req_size = (boost::format("vrun r.setres %dx%d") %
|
||||||
|
width %
|
||||||
|
height).str();
|
||||||
|
sendCommand(req_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::setCameraFOV(float hfov_deg)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Setting the camera horizontal field of view to: " << hfov_deg << " deg";
|
||||||
|
const int cam_id = 0;
|
||||||
|
std::string req_fov = (boost::format("vset /camera/%i/horizontal_fieldofview %.5f") %
|
||||||
|
cam_id %
|
||||||
|
hfov_deg).str();
|
||||||
|
sendCommand(req_fov);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::sendCommand(std::string command)
|
||||||
|
{
|
||||||
|
if (!(boost::starts_with(command, "vset") || boost::starts_with(command, "vrun"))) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"invalid command: command must start with vget or (vset, vrun)");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t tmpc = counter_++;
|
||||||
|
VLOG(1) << "SET:" << tmpc << " " << command;
|
||||||
|
send(command, tmpc);
|
||||||
|
sleep(delay_);
|
||||||
|
|
||||||
|
std::string result_prefix = std::to_string(tmpc) + ":";
|
||||||
|
|
||||||
|
/*
|
||||||
|
* is set command: we never expect something else than "ok",
|
||||||
|
* we do not use mapf
|
||||||
|
*/
|
||||||
|
|
||||||
|
std::string result = receive<std::string>(
|
||||||
|
[this] (std::istream& stream, uint32_t size) -> std::string {
|
||||||
|
return this->istreamToString(stream, size);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!boost::starts_with(result, result_prefix + "ok")) {
|
||||||
|
throw std::runtime_error("response identifier is incorrect");
|
||||||
|
} else {
|
||||||
|
VLOG(1) << "GET:" << tmpc << " " << "ok";
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(delay_);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename Result>
|
||||||
|
Result UnrealCvClient::sendCommand(std::string command, std::function<Result(std::istream&, uint32_t)> mapf)
|
||||||
|
{
|
||||||
|
if (!(boost::starts_with(command, "vget")))
|
||||||
|
{
|
||||||
|
throw std::runtime_error(
|
||||||
|
"invalid command: command must start with vget or (vset, vrun)");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t tmpc = counter_++;
|
||||||
|
VLOG(1) << "SET:" << tmpc << " " << command;
|
||||||
|
send(command, tmpc);
|
||||||
|
sleep(delay_);
|
||||||
|
|
||||||
|
std::string result_prefix = std::to_string(tmpc) + ":";
|
||||||
|
|
||||||
|
/*
|
||||||
|
* is get command: we expect a response that can
|
||||||
|
* be a string or binary data
|
||||||
|
*/
|
||||||
|
|
||||||
|
Result result = receive<Result>(
|
||||||
|
[this, result_prefix, mapf] (std::istream& stream, uint32_t size) -> Result {
|
||||||
|
|
||||||
|
std::string prefix = istreamToString(stream, result_prefix.size());
|
||||||
|
size-=result_prefix.size();
|
||||||
|
|
||||||
|
if(!boost::starts_with(prefix, result_prefix)) {
|
||||||
|
throw std::runtime_error("response identifier is incorrect");
|
||||||
|
}
|
||||||
|
|
||||||
|
return mapf(stream, size);
|
||||||
|
});
|
||||||
|
|
||||||
|
sleep(delay_);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::send(std::string msg, uint32_t counter)
|
||||||
|
{
|
||||||
|
std::string out = std::to_string(counter) + ":" + msg;
|
||||||
|
|
||||||
|
uint32_t magic = 0x9E2B83C1;
|
||||||
|
uint32_t size = out.length();
|
||||||
|
|
||||||
|
boost::asio::streambuf request;
|
||||||
|
std::ostream request_stream(&request);
|
||||||
|
boost::system::error_code ec;
|
||||||
|
|
||||||
|
request_stream.write((char*) &magic, sizeof(magic));
|
||||||
|
request_stream.write((char*) &size, sizeof(size));
|
||||||
|
|
||||||
|
request_stream << out;
|
||||||
|
// Send the request.
|
||||||
|
boost::asio::write(socket_,
|
||||||
|
request,
|
||||||
|
boost::asio::transfer_exactly(request.size() + sizeof(magic) + sizeof(size)),
|
||||||
|
ec);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename Result>
|
||||||
|
Result UnrealCvClient::receive(std::function<Result(std::istream&, uint32_t)> parser)
|
||||||
|
{
|
||||||
|
|
||||||
|
boost::system::error_code ec;
|
||||||
|
boost::asio::streambuf response;
|
||||||
|
|
||||||
|
//first read the 8 byte header
|
||||||
|
boost::asio::read(socket_, response, boost::asio::transfer_exactly(8), ec);
|
||||||
|
handleError(ec);
|
||||||
|
|
||||||
|
uint32_t magic;
|
||||||
|
uint32_t size;
|
||||||
|
|
||||||
|
// Check that response is OK.
|
||||||
|
std::istream response_stream(&response);
|
||||||
|
response_stream.read((char*)&magic, sizeof(magic));
|
||||||
|
response_stream.read((char*)&size, sizeof(size));
|
||||||
|
|
||||||
|
|
||||||
|
boost::asio::read(socket_, response, boost::asio::transfer_exactly(size), ec);
|
||||||
|
handleError(ec);
|
||||||
|
|
||||||
|
Result res = parser(response_stream, size);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::handleError(boost::system::error_code ec)
|
||||||
|
{
|
||||||
|
if (ec == boost::asio::error::eof) {
|
||||||
|
throw boost::system::system_error(ec); // Some other error.
|
||||||
|
} else if (ec) {
|
||||||
|
throw boost::system::system_error(ec); // Some other error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnrealCvClient::sleep(uint32_t delay) {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(delay));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string UnrealCvClient::istreamToString(
|
||||||
|
std::istream& stream, uint32_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
char buffer[size];
|
||||||
|
stream.read(buffer, size);
|
||||||
|
|
||||||
|
std::stringstream out;
|
||||||
|
out << buffer;
|
||||||
|
std::string result = out.str();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// from cnpy: https://github.com/rogersce/cnpy/blob/master/cnpy.cpp
|
||||||
|
void UnrealCvClient::parse_npy_header(unsigned char* buffer,
|
||||||
|
size_t& word_size,
|
||||||
|
std::vector<size_t>& shape,
|
||||||
|
bool& fortran_order)
|
||||||
|
{
|
||||||
|
|
||||||
|
//std::string magic_string(buffer,6);
|
||||||
|
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer+6);
|
||||||
|
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer+7);
|
||||||
|
uint16_t header_len = *reinterpret_cast<uint16_t*>(buffer+8);
|
||||||
|
std::string header(reinterpret_cast<char*>(buffer+9),header_len);
|
||||||
|
|
||||||
|
size_t loc1, loc2;
|
||||||
|
|
||||||
|
//fortran order
|
||||||
|
loc1 = header.find("fortran_order")+16;
|
||||||
|
fortran_order = (header.substr(loc1,4) == "True" ? true : false);
|
||||||
|
|
||||||
|
//shape
|
||||||
|
loc1 = header.find("(");
|
||||||
|
loc2 = header.find(")");
|
||||||
|
|
||||||
|
std::regex num_regex("[0-9][0-9]*");
|
||||||
|
std::smatch sm;
|
||||||
|
shape.clear();
|
||||||
|
|
||||||
|
std::string str_shape = header.substr(loc1+1,loc2-loc1-1);
|
||||||
|
while(std::regex_search(str_shape, sm, num_regex))
|
||||||
|
{
|
||||||
|
shape.push_back(std::stoi(sm[0].str()));
|
||||||
|
str_shape = sm.suffix().str();
|
||||||
|
}
|
||||||
|
|
||||||
|
//endian, word size, data type
|
||||||
|
//byte order code | stands for not applicable.
|
||||||
|
//not sure when this applies except for byte array
|
||||||
|
loc1 = header.find("descr")+9;
|
||||||
|
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
|
||||||
|
assert(littleEndian);
|
||||||
|
|
||||||
|
//char type = header[loc1+1];
|
||||||
|
//assert(type == map_type(T));
|
||||||
|
|
||||||
|
std::string str_ws = header.substr(loc1+2);
|
||||||
|
loc2 = str_ws.find("'");
|
||||||
|
word_size = atoi(str_ws.substr(0,loc2).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,35 @@
|
|||||||
|
#include <esim/unrealcv_bridge/unrealcv_bridge.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
using boost::asio::ip::tcp;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
event_camera_simulator::UnrealCvClient client("localhost", "9000");
|
||||||
|
|
||||||
|
cv::namedWindow("Image", cv::WINDOW_AUTOSIZE );
|
||||||
|
cv::namedWindow("Depthmap", cv::WINDOW_AUTOSIZE );
|
||||||
|
|
||||||
|
for(double y = 0.0; y<100.0; y+=10.0)
|
||||||
|
{
|
||||||
|
event_camera_simulator::CameraData test = {0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
|
y,
|
||||||
|
100.0};
|
||||||
|
|
||||||
|
client.setCamera(test);
|
||||||
|
cv::Mat img = client.getImage(0);
|
||||||
|
cv::imshow("Image", img);
|
||||||
|
|
||||||
|
cv::Mat depthmap = client.getDepth(0);
|
||||||
|
cv::normalize(depthmap, depthmap, 0, 255, cv::NORM_MINMAX, CV_8U);
|
||||||
|
cv::imshow("Depthmap", depthmap);
|
||||||
|
|
||||||
|
cv::waitKey(10);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
31
event_camera_simulator/esim_visualization/CMakeLists.txt
Normal file
31
event_camera_simulator/esim_visualization/CMakeLists.txt
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(esim_visualization)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
# This macro ensures modules and global scripts declared therein get installed
|
||||||
|
# See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
|
||||||
|
catkin_python_setup()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/visualization/publisher_interface.hpp
|
||||||
|
include/esim/visualization/ros_utils.hpp
|
||||||
|
include/esim/visualization/ros_publisher.hpp
|
||||||
|
include/esim/visualization/rosbag_writer.hpp
|
||||||
|
include/esim/visualization/adaptive_sampling_benchmark_publisher.hpp
|
||||||
|
include/esim/visualization/synthetic_optic_flow_publisher.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/ros_utils.cpp
|
||||||
|
src/ros_publisher.cpp
|
||||||
|
src/rosbag_writer.cpp
|
||||||
|
src/adaptive_sampling_benchmark_publisher.cpp
|
||||||
|
src/synthetic_optic_flow_publisher.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
570
event_camera_simulator/esim_visualization/cfg/esim.perspective
Normal file
570
event_camera_simulator/esim_visualization/cfg/esim.perspective
Normal file
@ -0,0 +1,570 @@
|
|||||||
|
{
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"pluginmanager": {
|
||||||
|
"keys": {
|
||||||
|
"running-plugins": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "{u'rqt_plot/Plot': [2], u'rqt_image_view/ImageView': [4, 3, 1, 2]}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {
|
||||||
|
"plugin__rqt_image_view__ImageView__6": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View (6)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'10'"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/dvs/image_raw_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'0'"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/dvs/image_raw'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_image_view__ImageView__4": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View (4)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "10.0"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/flow_arrows_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "0"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/flow_arrows'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_image_view__ImageView__5": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View (5)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'10'"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/davis/left/rendering/undistorted/pos_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'0'"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/davis/left/rendering/undistorted/pos'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'false'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_image_view__ImageView__2": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View (2)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "14.0"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/dvs_rendering_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "0"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/dvs_rendering'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_image_view__ImageView__3": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View (3)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "10.0"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/cam0/image_raw_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "0"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/cam0/image_raw'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_image_view__ImageView__1": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ImageViewWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Image View'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"max_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "10.0"
|
||||||
|
},
|
||||||
|
"mouse_pub_topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/flow_color_mouse_left'"
|
||||||
|
},
|
||||||
|
"num_gridlines": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "0"
|
||||||
|
},
|
||||||
|
"toolbar_hidden": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"zoom1": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"dynamic_range": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'/flow_color'"
|
||||||
|
},
|
||||||
|
"publish_click_location": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "False"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_plot__Plot__1": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__DataPlotWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'MatPlot'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"autoscroll": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"plot_type": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'1'"
|
||||||
|
},
|
||||||
|
"topics": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[u'/dvs/imu/linear_acceleration/z', u'/dvs/imu/linear_acceleration/x', u'/dvs/imu/linear_acceleration/y']"
|
||||||
|
},
|
||||||
|
"y_limits": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[u'-13.594160308518013', u'26.558581681023206']"
|
||||||
|
},
|
||||||
|
"x_limits": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[u'-1522917844.8259192', u'-1522917843.8259192']"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_plot__Plot__2": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__DataPlotWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'MatPlot (2)'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"autoscroll": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "True"
|
||||||
|
},
|
||||||
|
"plot_type": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "1"
|
||||||
|
},
|
||||||
|
"topics": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[u'/imu/linear_acceleration/z', u'/imu/linear_acceleration/x', u'/imu/linear_acceleration/y']"
|
||||||
|
},
|
||||||
|
"y_limits": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[-24.496623122229014, 26.176058546767358]"
|
||||||
|
},
|
||||||
|
"x_limits": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "[-1523004547.6889791, -1523004546.6889791]"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_publisher__Publisher__1": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"publishers": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'[]'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_service_caller__ServiceCaller__1": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget__ServiceCallerWidget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Service Caller'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"splitter_orientation": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'2'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"plugin__rqt_reconfigure__Param__1": {
|
||||||
|
"keys": {},
|
||||||
|
"groups": {
|
||||||
|
"dock_widget___plugincontainer_top_widget": {
|
||||||
|
"keys": {
|
||||||
|
"dockable": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'true'"
|
||||||
|
},
|
||||||
|
"parent": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "None"
|
||||||
|
},
|
||||||
|
"dock_widget_title": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "u'Dynamic Reconfigure'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
"keys": {
|
||||||
|
"splitter": {
|
||||||
|
"type": "repr(QByteArray.hex)",
|
||||||
|
"repr(QByteArray.hex)": "QtCore.QByteArray('000000ff0000000100000002000000ae0000006401ffffffff010000000100')",
|
||||||
|
"pretty-print": " d "
|
||||||
|
},
|
||||||
|
"_splitter": {
|
||||||
|
"type": "repr(QByteArray.hex)",
|
||||||
|
"repr(QByteArray.hex)": "QtCore.QByteArray('000000ff00000001000000020000012c000000640100000009010000000200')",
|
||||||
|
"pretty-print": " , d "
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mainwindow": {
|
||||||
|
"keys": {
|
||||||
|
"geometry": {
|
||||||
|
"type": "repr(QByteArray.hex)",
|
||||||
|
"repr(QByteArray.hex)": "QtCore.QByteArray('01d9d0cb0002000000000041000000180000077f0000043700000041000000180000077f0000043700000001000000000780')",
|
||||||
|
"pretty-print": " A 7 A 7 "
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
|
"type": "repr(QByteArray.hex)",
|
||||||
|
"repr(QByteArray.hex)": "QtCore.QByteArray('000000ff00000000fd00000001000000030000073f0000040afc0100000006fc000000000000019b0000000000fffffffc0200000001fb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0035005f005f0049006d00610067006500560069006500770057006900640067006500740100000000000002020000000000000000fb00000058007200710074005f007000750062006c00690073006800650072005f005f005000750062006c00690073006800650072005f005f0031005f005f005000750062006c0069007300680065007200570069006400670065007401000001b0000002650000000000000000fc00000000000003ad000001c000fffffffc0100000002fc00000000000001d3000000dd00fffffffc0200000002fb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0033005f005f0049006d00610067006500560069006500770057006900640067006500740100000000000001ff0000005100fffffffb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0031005f005f0049006d00610067006500560069006500770057006900640067006500740100000205000002050000005100fffffffc000001d9000001d4000000dd00fffffffc0200000002fb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0034005f005f0049006d00610067006500560069006500770057006900640067006500740100000000000001ff0000005100fffffffb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0032005f005f0049006d00610067006500560069006500770057006900640067006500740100000205000002050000005100fffffffb0000005a007200710074005f0069006d006100670065005f0076006900650077005f005f0049006d0061006700650056006900650077005f005f0036005f005f0049006d006100670065005600690065007700570069006400670065007401000005a40000019b0000000000000000fb00000042007200710074005f0070006c006f0074005f005f0050006c006f0074005f005f0031005f005f00440061007400610050006c006f0074005700690064006700650074010000029a000002220000000000000000fb00000042007200710074005f0070006c006f0074005f005f0050006c006f0074005f005f0032005f005f00440061007400610050006c006f007400570069006400670065007401000003b30000038c0000017800ffffff0000073f0000000000000004000000040000000800000008fc00000001000000030000000100000036004d0069006e0069006d0069007a006500640044006f0063006b00570069006400670065007400730054006f006f006c0062006100720000000000ffffffff0000000000000000')",
|
||||||
|
"pretty-print": " e Q Q Zrqt_image_view__ImageView__4__ImageViewWidget Zrqt_image_view__ImageView__2__ImageViewWidget Zrqt_image_view__ImageView__6__ImageViewWidget Brqt_plot__Plot__1__DataPlotWidget Brqt_plot__Plot__2__DataPlotWidget 6MinimizedDockWidgetsToolbar "
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {
|
||||||
|
"toolbar_areas": {
|
||||||
|
"keys": {
|
||||||
|
"MinimizedDockWidgetsToolbar": {
|
||||||
|
"type": "repr",
|
||||||
|
"repr": "8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"groups": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
207
event_camera_simulator/esim_visualization/cfg/esim.rviz
Normal file
207
event_camera_simulator/esim_visualization/cfg/esim.rviz
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
Panels:
|
||||||
|
- Class: rviz/Displays
|
||||||
|
Help Height: 78
|
||||||
|
Name: Displays
|
||||||
|
Property Tree Widget:
|
||||||
|
Expanded:
|
||||||
|
- /Grid1/Offset1
|
||||||
|
- /TF1/Status1
|
||||||
|
- /TF1/Frames1
|
||||||
|
- /PointCloud21
|
||||||
|
- /PointCloud21/Autocompute Value Bounds1
|
||||||
|
Splitter Ratio: 0.504322767
|
||||||
|
Tree Height: 449
|
||||||
|
- Class: rviz/Selection
|
||||||
|
Name: Selection
|
||||||
|
- Class: rviz/Tool Properties
|
||||||
|
Expanded:
|
||||||
|
- /2D Pose Estimate1
|
||||||
|
- /2D Nav Goal1
|
||||||
|
- /Publish Point1
|
||||||
|
Name: Tool Properties
|
||||||
|
Splitter Ratio: 0.588679016
|
||||||
|
- Class: rviz/Views
|
||||||
|
Expanded:
|
||||||
|
- /Current View1
|
||||||
|
Name: Views
|
||||||
|
Splitter Ratio: 0.5
|
||||||
|
- Class: rviz/Time
|
||||||
|
Experimental: false
|
||||||
|
Name: Time
|
||||||
|
SyncMode: 0
|
||||||
|
SyncSource: Image
|
||||||
|
Visualization Manager:
|
||||||
|
Class: ""
|
||||||
|
Displays:
|
||||||
|
- Alpha: 0.5
|
||||||
|
Cell Size: 1
|
||||||
|
Class: rviz/Grid
|
||||||
|
Color: 160; 160; 164
|
||||||
|
Enabled: true
|
||||||
|
Line Style:
|
||||||
|
Line Width: 0.0299999993
|
||||||
|
Value: Lines
|
||||||
|
Name: Grid
|
||||||
|
Normal Cell Count: 0
|
||||||
|
Offset:
|
||||||
|
X: 0
|
||||||
|
Y: 0
|
||||||
|
Z: -1
|
||||||
|
Plane: XY
|
||||||
|
Plane Cell Count: 10
|
||||||
|
Reference Frame: <Fixed Frame>
|
||||||
|
Value: true
|
||||||
|
- Class: rviz/TF
|
||||||
|
Enabled: true
|
||||||
|
Frame Timeout: 15
|
||||||
|
Frames:
|
||||||
|
All Enabled: false
|
||||||
|
body:
|
||||||
|
Value: false
|
||||||
|
cam0:
|
||||||
|
Value: true
|
||||||
|
map:
|
||||||
|
Value: false
|
||||||
|
Marker Scale: 1
|
||||||
|
Name: TF
|
||||||
|
Show Arrows: false
|
||||||
|
Show Axes: true
|
||||||
|
Show Names: false
|
||||||
|
Tree:
|
||||||
|
map:
|
||||||
|
body:
|
||||||
|
{}
|
||||||
|
cam0:
|
||||||
|
{}
|
||||||
|
Update Interval: 0
|
||||||
|
Value: true
|
||||||
|
- Class: rviz/Image
|
||||||
|
Enabled: true
|
||||||
|
Image Topic: /dvs_rendering
|
||||||
|
Max Value: 1
|
||||||
|
Median window: 5
|
||||||
|
Min Value: 0
|
||||||
|
Name: Image
|
||||||
|
Normalize Range: true
|
||||||
|
Queue Size: 2
|
||||||
|
Transport Hint: raw
|
||||||
|
Unreliable: false
|
||||||
|
Value: true
|
||||||
|
- Alpha: 1
|
||||||
|
Autocompute Intensity Bounds: true
|
||||||
|
Autocompute Value Bounds:
|
||||||
|
Max Value: 1.51484442
|
||||||
|
Min Value: 1.28932905
|
||||||
|
Value: true
|
||||||
|
Axis: Z
|
||||||
|
Channel Name: intensity
|
||||||
|
Class: rviz/PointCloud2
|
||||||
|
Color: 132; 132; 132
|
||||||
|
Color Transformer: RGB8
|
||||||
|
Decay Time: 100
|
||||||
|
Enabled: true
|
||||||
|
Invert Rainbow: false
|
||||||
|
Max Color: 255; 255; 255
|
||||||
|
Max Intensity: 1
|
||||||
|
Min Color: 0; 0; 0
|
||||||
|
Min Intensity: 0
|
||||||
|
Name: PointCloud2
|
||||||
|
Position Transformer: XYZ
|
||||||
|
Queue Size: 10
|
||||||
|
Selectable: true
|
||||||
|
Size (Pixels): 1
|
||||||
|
Size (m): 0.00999999978
|
||||||
|
Style: Points
|
||||||
|
Topic: /cam0/pointcloud
|
||||||
|
Unreliable: false
|
||||||
|
Use Fixed Frame: false
|
||||||
|
Use rainbow: true
|
||||||
|
Value: true
|
||||||
|
- Alpha: 1
|
||||||
|
Buffer Length: 1
|
||||||
|
Class: rviz/Path
|
||||||
|
Color: 25; 255; 0
|
||||||
|
Enabled: true
|
||||||
|
Head Diameter: 0.300000012
|
||||||
|
Head Length: 0.200000003
|
||||||
|
Length: 0.100000001
|
||||||
|
Line Style: Billboards
|
||||||
|
Line Width: 0.0199999996
|
||||||
|
Name: Path
|
||||||
|
Offset:
|
||||||
|
X: 0
|
||||||
|
Y: 0
|
||||||
|
Z: 0
|
||||||
|
Pose Color: 255; 85; 255
|
||||||
|
Pose Style: None
|
||||||
|
Radius: 0.0199999996
|
||||||
|
Shaft Diameter: 0.100000001
|
||||||
|
Shaft Length: 0.100000001
|
||||||
|
Topic: /trajectory
|
||||||
|
Unreliable: false
|
||||||
|
Value: true
|
||||||
|
Enabled: true
|
||||||
|
Global Options:
|
||||||
|
Background Color: 255; 255; 255
|
||||||
|
Default Light: true
|
||||||
|
Fixed Frame: map
|
||||||
|
Frame Rate: 30
|
||||||
|
Name: root
|
||||||
|
Tools:
|
||||||
|
- Class: rviz/Interact
|
||||||
|
Hide Inactive Objects: true
|
||||||
|
- Class: rviz/MoveCamera
|
||||||
|
- Class: rviz/Select
|
||||||
|
- Class: rviz/FocusCamera
|
||||||
|
- Class: rviz/Measure
|
||||||
|
- Class: rviz/SetInitialPose
|
||||||
|
Topic: /initialpose
|
||||||
|
- Class: rviz/SetGoal
|
||||||
|
Topic: /move_base_simple/goal
|
||||||
|
- Class: rviz/PublishPoint
|
||||||
|
Single click: true
|
||||||
|
Topic: /clicked_point
|
||||||
|
Value: true
|
||||||
|
Views:
|
||||||
|
Current:
|
||||||
|
Class: rviz/Orbit
|
||||||
|
Distance: 4.34333372
|
||||||
|
Enable Stereo Rendering:
|
||||||
|
Stereo Eye Separation: 0.0599999987
|
||||||
|
Stereo Focal Distance: 1
|
||||||
|
Swap Stereo Eyes: false
|
||||||
|
Value: false
|
||||||
|
Focal Point:
|
||||||
|
X: 0.177987233
|
||||||
|
Y: -0.165689588
|
||||||
|
Z: -0.522414088
|
||||||
|
Focal Shape Fixed Size: true
|
||||||
|
Focal Shape Size: 0.0500000007
|
||||||
|
Invert Z Axis: false
|
||||||
|
Name: Current View
|
||||||
|
Near Clip Distance: 0.00999999978
|
||||||
|
Pitch: 0.280202091
|
||||||
|
Target Frame: <Fixed Frame>
|
||||||
|
Value: Orbit (rviz)
|
||||||
|
Yaw: 4.70558119
|
||||||
|
Saved: ~
|
||||||
|
Window Geometry:
|
||||||
|
Displays:
|
||||||
|
collapsed: true
|
||||||
|
Height: 1056
|
||||||
|
Hide Left Dock: true
|
||||||
|
Hide Right Dock: true
|
||||||
|
Image:
|
||||||
|
collapsed: false
|
||||||
|
QMainWindow State: 000000ff00000000fd00000004000000000000016a00000250fc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006100fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000002800000250000000d700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000c00430061006d0065007200610000000262000000160000000000000000000000010000010f00000250fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002800000250000000ad00fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f00000184fc0100000005fb0000000a0049006d00610067006501000000000000073f0000005c00fffffffc000000000000073f0000000000fffffffa000000000200000002fb0000000a0049006d0061006700650100000000ffffffff0000000000000000fb0000000800540069006d006500000003c40000003e0000003b00fffffffb0000000800540069006d00650100000000000004500000000000000000fb000000180049006e00700075007400200074006f002000560049004f010000009d000006a20000000000000000fb00000016004400410056004900530020006600720061006d00650000000000ffffffff00000000000000000000073f0000025000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
|
||||||
|
Selection:
|
||||||
|
collapsed: false
|
||||||
|
Time:
|
||||||
|
collapsed: false
|
||||||
|
Tool Properties:
|
||||||
|
collapsed: false
|
||||||
|
Views:
|
||||||
|
collapsed: true
|
||||||
|
Width: 1855
|
||||||
|
X: 1985
|
||||||
|
Y: 24
|
@ -0,0 +1,45 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <esim/visualization/publisher_interface.hpp>
|
||||||
|
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class AdaptiveSamplingBenchmarkPublisher : public Publisher
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
using PixelLocation = std::pair<int,int>;
|
||||||
|
using PixelLocations = std::vector<PixelLocation>;
|
||||||
|
|
||||||
|
AdaptiveSamplingBenchmarkPublisher(const std::string &benchmark_folder,
|
||||||
|
const std::string &pixels_to_record_filename);
|
||||||
|
|
||||||
|
~AdaptiveSamplingBenchmarkPublisher();
|
||||||
|
|
||||||
|
virtual void imageCallback(const ImagePtrVector& images, Time t) override;
|
||||||
|
virtual void eventsCallback(const EventsVector& events) override;
|
||||||
|
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override;
|
||||||
|
|
||||||
|
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override {}
|
||||||
|
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {}
|
||||||
|
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override {}
|
||||||
|
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override {}
|
||||||
|
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
|
||||||
|
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override {}
|
||||||
|
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override {}
|
||||||
|
|
||||||
|
static Publisher::Ptr createFromGflags();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::ofstream events_file_;
|
||||||
|
std::ofstream images_file_;
|
||||||
|
std::ofstream pixel_intensities_file_;
|
||||||
|
std::ofstream optic_flows_file_;
|
||||||
|
size_t image_index_;
|
||||||
|
PixelLocations pixels_to_record_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,29 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <ze/common/macros.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class Publisher
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ZE_POINTER_TYPEDEFS(Publisher);
|
||||||
|
|
||||||
|
Publisher() = default;
|
||||||
|
virtual ~Publisher() = default;
|
||||||
|
|
||||||
|
virtual void imageCallback(const ImagePtrVector& images, Time t) = 0;
|
||||||
|
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) = 0;
|
||||||
|
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) = 0;
|
||||||
|
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) = 0;
|
||||||
|
virtual void eventsCallback(const EventsVector& events) = 0;
|
||||||
|
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) = 0;
|
||||||
|
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) = 0;
|
||||||
|
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) = 0;
|
||||||
|
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) = 0;
|
||||||
|
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) = 0;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,59 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <esim/visualization/publisher_interface.hpp>
|
||||||
|
|
||||||
|
#include <ros/ros.h>
|
||||||
|
#include <image_transport/image_transport.h>
|
||||||
|
#include <geometry_msgs/TransformStamped.h>
|
||||||
|
#include <tf/tf.h>
|
||||||
|
#include <tf/transform_broadcaster.h>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class RosPublisher : public Publisher
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
RosPublisher(size_t num_cameras);
|
||||||
|
~RosPublisher();
|
||||||
|
|
||||||
|
virtual void imageCallback(const ImagePtrVector& images, Time t) override;
|
||||||
|
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override;
|
||||||
|
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override;
|
||||||
|
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override;
|
||||||
|
virtual void eventsCallback(const EventsVector& events) override;
|
||||||
|
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override;
|
||||||
|
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override;
|
||||||
|
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override;
|
||||||
|
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override;
|
||||||
|
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t num_cameras_;
|
||||||
|
std::vector<cv::Size> sensor_sizes_;
|
||||||
|
|
||||||
|
std::shared_ptr<ros::NodeHandle> nh_;
|
||||||
|
std::shared_ptr<image_transport::ImageTransport> it_;
|
||||||
|
|
||||||
|
std::vector< std::shared_ptr<ros::Publisher> > event_pub_;
|
||||||
|
std::shared_ptr<ros::Publisher> pose_pub_;
|
||||||
|
std::shared_ptr<ros::Publisher> imu_pub_;
|
||||||
|
std::vector< std::shared_ptr<ros::Publisher> > pointcloud_pub_;
|
||||||
|
std::vector< std::shared_ptr<ros::Publisher> > camera_info_pub_;
|
||||||
|
std::vector< std::shared_ptr<image_transport::Publisher> > image_pub_;
|
||||||
|
std::vector< std::shared_ptr<image_transport::Publisher> > image_corrupted_pub_;
|
||||||
|
std::vector< std::shared_ptr<image_transport::Publisher> > depthmap_pub_;
|
||||||
|
std::vector< std::shared_ptr<ros::Publisher> > optic_flow_pub_;
|
||||||
|
std::vector< std::shared_ptr<ros::Publisher> > twist_pub_;
|
||||||
|
std::shared_ptr<tf::TransformBroadcaster> tf_broadcaster_;
|
||||||
|
|
||||||
|
Time last_published_camera_info_time_;
|
||||||
|
Time last_published_image_time_;
|
||||||
|
Time last_published_corrupted_image_time_;
|
||||||
|
Time last_published_depthmap_time_;
|
||||||
|
Time last_published_optic_flow_time_;
|
||||||
|
Time last_published_pointcloud_time_;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,54 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <pcl_ros/point_cloud.h>
|
||||||
|
#include <pcl/point_types.h>
|
||||||
|
#include <sensor_msgs/Image.h>
|
||||||
|
#include <dvs_msgs/EventArray.h>
|
||||||
|
#include <geometry_msgs/PoseStamped.h>
|
||||||
|
#include <geometry_msgs/TwistStamped.h>
|
||||||
|
#include <sensor_msgs/Imu.h>
|
||||||
|
#include <sensor_msgs/CameraInfo.h>
|
||||||
|
#include <esim_msgs/OpticFlow.h>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
inline std::string getTopicName(int i, const std::string& suffix)
|
||||||
|
{
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "cam" << i << "/" << suffix;
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::string getTopicName(const std::string& prefix, int i, const std::string& suffix)
|
||||||
|
{
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << prefix << "/" << getTopicName(i, suffix);
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline ros::Time toRosTime(Time t)
|
||||||
|
{
|
||||||
|
ros::Time ros_time;
|
||||||
|
ros_time.fromNSec(t);
|
||||||
|
return ros_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
void pointCloudToMsg(const PointCloud& pointcloud, const std::string& frame_id, Time t,pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg);
|
||||||
|
|
||||||
|
void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg);
|
||||||
|
|
||||||
|
void depthmapToMsg(const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg);
|
||||||
|
|
||||||
|
void opticFlowToMsg(const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg);
|
||||||
|
|
||||||
|
void eventsToMsg(const Events& events, int width, int height, dvs_msgs::EventArrayPtr& msg);
|
||||||
|
|
||||||
|
sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t);
|
||||||
|
|
||||||
|
geometry_msgs::TwistStamped twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t);
|
||||||
|
|
||||||
|
void cameraToMsg(const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg);
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,45 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <esim/visualization/publisher_interface.hpp>
|
||||||
|
#include <rosbag/bag.h>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class RosbagWriter : public Publisher
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
RosbagWriter(const std::string& path_to_output_bag,
|
||||||
|
size_t num_cameras);
|
||||||
|
~RosbagWriter();
|
||||||
|
|
||||||
|
virtual void imageCallback(const ImagePtrVector& images, Time t) override;
|
||||||
|
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override;
|
||||||
|
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override;
|
||||||
|
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override;
|
||||||
|
virtual void eventsCallback(const EventsVector& events) override;
|
||||||
|
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override;
|
||||||
|
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override;
|
||||||
|
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override;
|
||||||
|
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override;
|
||||||
|
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override;
|
||||||
|
|
||||||
|
static Publisher::Ptr createBagWriterFromGflags(size_t num_cameras);
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t num_cameras_;
|
||||||
|
std::vector<cv::Size> sensor_sizes_;
|
||||||
|
rosbag::Bag bag_;
|
||||||
|
|
||||||
|
const std::string topic_name_prefix_ = "";
|
||||||
|
|
||||||
|
Time last_published_camera_info_time_;
|
||||||
|
Time last_published_image_time_;
|
||||||
|
Time last_published_corrupted_image_time_;
|
||||||
|
Time last_published_depthmap_time_;
|
||||||
|
Time last_published_optic_flow_time_;
|
||||||
|
Time last_published_pointcloud_time_;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,45 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
#include <esim/visualization/publisher_interface.hpp>
|
||||||
|
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
class SyntheticOpticFlowPublisher : public Publisher
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
SyntheticOpticFlowPublisher(const std::string &output_folder);
|
||||||
|
|
||||||
|
~SyntheticOpticFlowPublisher();
|
||||||
|
|
||||||
|
virtual void imageCallback(const ImagePtrVector& images, Time t) override {
|
||||||
|
CHECK_EQ(images.size(), 1);
|
||||||
|
if(sensor_size_.width == 0 || sensor_size_.height == 0)
|
||||||
|
{
|
||||||
|
sensor_size_ = images[0]->size();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void eventsCallback(const EventsVector& events) override;
|
||||||
|
virtual void opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t) override {}
|
||||||
|
|
||||||
|
virtual void imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t) override {}
|
||||||
|
virtual void depthmapCallback(const DepthmapPtrVector& depthmaps, Time t) override {}
|
||||||
|
virtual void poseCallback(const Transformation& T_W_B, const TransformationVector& T_W_Cs, Time t) override {}
|
||||||
|
virtual void twistCallback(const AngularVelocityVector& ws, const LinearVelocityVector& vs, Time t) override {}
|
||||||
|
virtual void imuCallback(const Vector3& acc, const Vector3& gyr, Time t) override {}
|
||||||
|
virtual void cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t) override {}
|
||||||
|
virtual void pointcloudCallback(const PointCloudVector& pointclouds, Time t) override {}
|
||||||
|
|
||||||
|
static Publisher::Ptr createFromGflags();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string output_folder_;
|
||||||
|
cv::Size sensor_size_;
|
||||||
|
std::ofstream events_file_;
|
||||||
|
Events events_; // buffer containing all the events since the beginning
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
31
event_camera_simulator/esim_visualization/package.xml
Normal file
31
event_camera_simulator/esim_visualization/package.xml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>esim_visualization</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>Visualizers for the event camera simulator.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>esim_msgs</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
<depend>roscpp</depend>
|
||||||
|
<depend>dvs_msgs</depend>
|
||||||
|
<depend>sensor_msgs</depend>
|
||||||
|
<depend>cv_bridge</depend>
|
||||||
|
<depend>geometry_msgs</depend>
|
||||||
|
<depend>image_transport</depend>
|
||||||
|
<depend>pcl_ros</depend>
|
||||||
|
<depend>tf</depend>
|
||||||
|
<depend>minkindr_conversions</depend>
|
||||||
|
<depend>rosbag</depend>
|
||||||
|
<depend>rospy</depend>
|
||||||
|
<depend>dvs_renderer</depend>
|
||||||
|
|
||||||
|
</package>
|
12
event_camera_simulator/esim_visualization/setup.py
Normal file
12
event_camera_simulator/esim_visualization/setup.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from distutils.core import setup
|
||||||
|
from catkin_pkg.python_setup import generate_distutils_setup
|
||||||
|
|
||||||
|
d = generate_distutils_setup(
|
||||||
|
packages=['esim_visualization'],
|
||||||
|
package_dir={'': 'py'},
|
||||||
|
install_requires=['rospy', 'sensor_msgs', 'esim_msgs'],
|
||||||
|
)
|
||||||
|
|
||||||
|
setup(**d)
|
@ -0,0 +1,135 @@
|
|||||||
|
#include <esim/visualization/adaptive_sampling_benchmark_publisher.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/common/path_utils.hpp>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
|
||||||
|
DEFINE_string(adaptive_sampling_benchmark_folder, "",
|
||||||
|
"Folder in which to output the results.");
|
||||||
|
|
||||||
|
DEFINE_string(adaptive_sampling_benchmark_pixels_to_record_file, "",
|
||||||
|
"File containing the pixel locations to record.");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
AdaptiveSamplingBenchmarkPublisher::AdaptiveSamplingBenchmarkPublisher(const std::string& benchmark_folder,
|
||||||
|
const std::string& pixels_to_record_filename)
|
||||||
|
: image_index_(0)
|
||||||
|
{
|
||||||
|
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "events.txt"),
|
||||||
|
&events_file_);
|
||||||
|
|
||||||
|
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "images.txt"),
|
||||||
|
&images_file_);
|
||||||
|
|
||||||
|
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "pixel_intensities.txt"),
|
||||||
|
&pixel_intensities_file_);
|
||||||
|
|
||||||
|
ze::openOutputFileStream(ze::joinPath(benchmark_folder, "optic_flows.txt"),
|
||||||
|
&optic_flows_file_);
|
||||||
|
|
||||||
|
// Load and parse the file containing the list of pixel locations
|
||||||
|
// whose intensity values to record
|
||||||
|
std::ifstream pixels_to_record_file;
|
||||||
|
if(pixels_to_record_filename != "")
|
||||||
|
{
|
||||||
|
ze::openFileStream(pixels_to_record_filename, &pixels_to_record_file);
|
||||||
|
|
||||||
|
int x, y;
|
||||||
|
LOG(INFO) << "Pixels that will be recorded: ";
|
||||||
|
while(pixels_to_record_file >> x >> y)
|
||||||
|
{
|
||||||
|
LOG(INFO) << x << " , " << y;
|
||||||
|
pixels_to_record_.push_back(PixelLocation(x,y));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Publisher::Ptr AdaptiveSamplingBenchmarkPublisher::createFromGflags()
|
||||||
|
{
|
||||||
|
if(FLAGS_adaptive_sampling_benchmark_folder == "")
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Empty benchmark folder string: will not write benchmark files";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<AdaptiveSamplingBenchmarkPublisher>(FLAGS_adaptive_sampling_benchmark_folder,
|
||||||
|
FLAGS_adaptive_sampling_benchmark_pixels_to_record_file);
|
||||||
|
}
|
||||||
|
|
||||||
|
AdaptiveSamplingBenchmarkPublisher::~AdaptiveSamplingBenchmarkPublisher()
|
||||||
|
{
|
||||||
|
// finish writing files
|
||||||
|
events_file_.close();
|
||||||
|
images_file_.close();
|
||||||
|
pixel_intensities_file_.close();
|
||||||
|
optic_flows_file_.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AdaptiveSamplingBenchmarkPublisher::imageCallback(const ImagePtrVector& images, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(images.size(), 1);
|
||||||
|
images_file_ << t << std::endl;
|
||||||
|
|
||||||
|
ImagePtr img = images[0];
|
||||||
|
cv::Mat img_8bit;
|
||||||
|
img->convertTo(img_8bit, CV_8U, 255);
|
||||||
|
|
||||||
|
if(image_index_ == 0)
|
||||||
|
{
|
||||||
|
static const std::vector<int> compression_params = {CV_IMWRITE_PNG_COMPRESSION, 0};
|
||||||
|
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << ze::joinPath(FLAGS_adaptive_sampling_benchmark_folder, "image_");
|
||||||
|
ss << image_index_ << ".png";
|
||||||
|
|
||||||
|
LOG(INFO) << ss.str();
|
||||||
|
cv::imwrite(ss.str(), img_8bit, compression_params);
|
||||||
|
}
|
||||||
|
|
||||||
|
for(const PixelLocation& pixel_loc : pixels_to_record_)
|
||||||
|
{
|
||||||
|
// write line in the form "x y I(x,y)"
|
||||||
|
const int x = pixel_loc.first;
|
||||||
|
const int y = pixel_loc.second;
|
||||||
|
pixel_intensities_file_ << x << " "
|
||||||
|
<< y << " "
|
||||||
|
<< (*images[0])(y,x) << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
image_index_++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AdaptiveSamplingBenchmarkPublisher::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(optic_flows.size(), 1);
|
||||||
|
for(const PixelLocation& pixel_loc : pixels_to_record_)
|
||||||
|
{
|
||||||
|
// write line in the form "x y optic_flow(x,y)[0] optic_flow(x,y)[1]"
|
||||||
|
const int x = pixel_loc.first;
|
||||||
|
const int y = pixel_loc.second;
|
||||||
|
optic_flows_file_ << x << " "
|
||||||
|
<< y << " "
|
||||||
|
<< (*optic_flows[0])(y,x)[0] << " "
|
||||||
|
<< (*optic_flows[0])(y,x)[1]
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void AdaptiveSamplingBenchmarkPublisher::eventsCallback(const EventsVector& events)
|
||||||
|
{
|
||||||
|
CHECK_EQ(events.size(), 1);
|
||||||
|
|
||||||
|
for(const Event& e : events[0])
|
||||||
|
{
|
||||||
|
events_file_ << e.t << " " << e.x << " " << e.y << " " << (e.pol? 1 : 0) << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
110
event_camera_simulator/esim_visualization/src/py/optic_flow_converter.py
Executable file
110
event_camera_simulator/esim_visualization/src/py/optic_flow_converter.py
Executable file
@ -0,0 +1,110 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
"""
|
||||||
|
Node that listens to esim_msgs/OpticFlow messages, and publishes it
|
||||||
|
as a color-coded image, and as a vector field.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import rospy
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
from sensor_msgs.msg import Image
|
||||||
|
from esim_msgs.msg import OpticFlow
|
||||||
|
from cv_bridge import CvBridge
|
||||||
|
|
||||||
|
class FlowConverterNode:
|
||||||
|
_p0 = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._bridge = CvBridge()
|
||||||
|
|
||||||
|
rospy.Subscriber("flow", OpticFlow, self._OpticFlowCallback)
|
||||||
|
self.pub_color = rospy.Publisher("flow_color", Image, queue_size=0)
|
||||||
|
self.pub_arrows = rospy.Publisher("flow_arrows", Image, queue_size=0)
|
||||||
|
|
||||||
|
self.arrows_step = rospy.get_param('~arrows_step', 12)
|
||||||
|
self.arrows_scale = rospy.get_param('~arrows_scale', 0.1)
|
||||||
|
self.arrows_upsample_factor = rospy.get_param('~arrows_upsample_factor', 2)
|
||||||
|
self.publish_rate = rospy.get_param('~publish_rate', 20)
|
||||||
|
|
||||||
|
rospy.loginfo('Started flow converter node')
|
||||||
|
rospy.loginfo('Step size between arrows: {}'.format(self.arrows_step))
|
||||||
|
rospy.loginfo('Scale factor: {:.2f}'.format(self.arrows_scale))
|
||||||
|
rospy.loginfo('Upsample factor: x{}'.format(self.arrows_upsample_factor))
|
||||||
|
rospy.loginfo('Publish rate: {} Hz'.format(self.publish_rate))
|
||||||
|
|
||||||
|
self.arrows_color = (0,0,255) # red
|
||||||
|
self.first_msg_received = False
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.first_msg_received = False
|
||||||
|
self.last_msg_stamp = None
|
||||||
|
|
||||||
|
def _OpticFlowCallback(self, msg):
|
||||||
|
if not self.first_msg_received:
|
||||||
|
rospy.logdebug('Received first message at stamp: {}'.format(msg.header.stamp.to_sec()))
|
||||||
|
self.last_msg_stamp = msg.header.stamp
|
||||||
|
self.first_msg_received = True
|
||||||
|
self.convertAndPublishFlow(msg)
|
||||||
|
|
||||||
|
if msg.header.stamp.to_sec() < self.last_msg_stamp.to_sec():
|
||||||
|
rospy.loginfo('Optic flow converter reset because new stamp is older than the latest stamp')
|
||||||
|
self.reset()
|
||||||
|
return
|
||||||
|
|
||||||
|
time_since_last_published_msg = (msg.header.stamp - self.last_msg_stamp).to_sec()
|
||||||
|
rospy.logdebug('Time since last published message: {}'.format(time_since_last_published_msg))
|
||||||
|
if time_since_last_published_msg >= 1./float(self.publish_rate):
|
||||||
|
self.last_msg_stamp = msg.header.stamp
|
||||||
|
self.convertAndPublishFlow(msg)
|
||||||
|
|
||||||
|
def convertAndPublishFlow(self, msg):
|
||||||
|
height, width = msg.height, msg.width
|
||||||
|
flow_x = np.array(msg.flow_x).reshape((height, width))
|
||||||
|
flow_y = np.array(msg.flow_y).reshape((height, width))
|
||||||
|
self.publishColorCodedFlow(flow_x, flow_y, msg.header.stamp)
|
||||||
|
self.publishArrowFlow(flow_x, flow_y, msg.header.stamp)
|
||||||
|
|
||||||
|
def publishColorCodedFlow(self, flow_x, flow_y, stamp):
|
||||||
|
assert(flow_x.shape == flow_y.shape)
|
||||||
|
height, width = flow_x.shape
|
||||||
|
magnitude, angle = cv2.cartToPolar(flow_x, flow_y)
|
||||||
|
magnitude = cv2.normalize(src=magnitude, dst=magnitude, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
||||||
|
|
||||||
|
hsv = np.zeros((height,width,3), dtype=np.uint8)
|
||||||
|
self.hsv = hsv.copy()
|
||||||
|
hsv[...,1] = 255
|
||||||
|
hsv[...,0] = 0.5 * angle * 180 / np.pi
|
||||||
|
hsv[...,2] = cv2.normalize(magnitude,None,0,255,cv2.NORM_MINMAX)
|
||||||
|
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
|
||||||
|
|
||||||
|
img_msg = self._bridge.cv2_to_imgmsg(bgr, 'bgr8')
|
||||||
|
img_msg.header.stamp = stamp
|
||||||
|
self.pub_color.publish(img_msg)
|
||||||
|
|
||||||
|
def publishArrowFlow(self, flow_x, flow_y, stamp):
|
||||||
|
assert(flow_x.shape == flow_y.shape)
|
||||||
|
height, width = flow_x.shape
|
||||||
|
|
||||||
|
step = self.arrows_step
|
||||||
|
scale = self.arrows_scale
|
||||||
|
ss = self.arrows_upsample_factor
|
||||||
|
arrow_field = np.zeros((ss * height, ss * width,3), dtype=np.uint8)
|
||||||
|
|
||||||
|
for x in np.arange(0, width, step):
|
||||||
|
for y in np.arange(0, height, step):
|
||||||
|
vx = flow_x[y,x]
|
||||||
|
vy = flow_y[y,x]
|
||||||
|
cv2.arrowedLine(arrow_field, (ss * x, ss * y),
|
||||||
|
(int(ss * (x + scale * vx)), int(ss * (y + scale * vy))), color=self.arrows_color, thickness=1)
|
||||||
|
|
||||||
|
img_msg = self._bridge.cv2_to_imgmsg(arrow_field, 'bgr8')
|
||||||
|
img_msg.header.stamp = stamp
|
||||||
|
self.pub_arrows.publish(img_msg)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
rospy.init_node('flow_converter_node')
|
||||||
|
node = FlowConverterNode()
|
||||||
|
rospy.spin()
|
400
event_camera_simulator/esim_visualization/src/ros_publisher.cpp
Normal file
400
event_camera_simulator/esim_visualization/src/ros_publisher.cpp
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
#include <esim/visualization/ros_publisher.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <esim/visualization/ros_utils.hpp>
|
||||||
|
#include <minkindr_conversions/kindr_msg.h>
|
||||||
|
#include <minkindr_conversions/kindr_tf.h>
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
|
||||||
|
DEFINE_double(ros_publisher_camera_info_rate, 10,
|
||||||
|
"Camera info (maximum) publish rate, in Hz");
|
||||||
|
|
||||||
|
DEFINE_double(ros_publisher_frame_rate, 25,
|
||||||
|
"(Maximum) frame rate, in Hz");
|
||||||
|
|
||||||
|
DEFINE_double(ros_publisher_depth_rate, 25,
|
||||||
|
"(Maximum) depthmap publish rate, in Hz");
|
||||||
|
|
||||||
|
DEFINE_double(ros_publisher_pointcloud_rate, 25,
|
||||||
|
"(Maximum) point cloud publish rate, in Hz");
|
||||||
|
|
||||||
|
DEFINE_double(ros_publisher_optic_flow_rate, 25,
|
||||||
|
"(Maximum) optic flow map publish rate, in Hz");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
RosPublisher::RosPublisher(size_t num_cameras)
|
||||||
|
{
|
||||||
|
CHECK_GE(num_cameras, 1);
|
||||||
|
num_cameras_ = num_cameras;
|
||||||
|
sensor_sizes_ = std::vector<cv::Size>(num_cameras_);
|
||||||
|
|
||||||
|
// Initialize ROS if it was not initialized before.
|
||||||
|
if(!ros::isInitialized())
|
||||||
|
{
|
||||||
|
VLOG(1) << "Initializing ROS";
|
||||||
|
int argc = 0;
|
||||||
|
ros::init(argc, nullptr, std::string("ros_publisher"), ros::init_options::NoSigintHandler);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create node and subscribe.
|
||||||
|
nh_.reset(new ros::NodeHandle(""));
|
||||||
|
it_.reset(new image_transport::ImageTransport(*nh_));
|
||||||
|
|
||||||
|
// Setup ROS publishers for images, events, poses, depth maps, camera info, etc.
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
event_pub_.emplace_back(
|
||||||
|
new ros::Publisher(
|
||||||
|
nh_->advertise<dvs_msgs::EventArray> (getTopicName(i, "events"), 0)));
|
||||||
|
|
||||||
|
image_pub_.emplace_back(
|
||||||
|
new image_transport::Publisher(
|
||||||
|
it_->advertise(getTopicName(i, "image_raw"), 0)));
|
||||||
|
|
||||||
|
image_corrupted_pub_.emplace_back(
|
||||||
|
new image_transport::Publisher(
|
||||||
|
it_->advertise(getTopicName(i, "image_corrupted"), 0)));
|
||||||
|
|
||||||
|
depthmap_pub_.emplace_back(
|
||||||
|
new image_transport::Publisher(
|
||||||
|
it_->advertise(getTopicName(i, "depthmap"), 0)));
|
||||||
|
|
||||||
|
optic_flow_pub_.emplace_back(
|
||||||
|
new ros::Publisher(
|
||||||
|
nh_->advertise<esim_msgs::OpticFlow> (getTopicName(i, "optic_flow"), 0)));
|
||||||
|
|
||||||
|
camera_info_pub_.emplace_back(
|
||||||
|
new ros::Publisher(
|
||||||
|
nh_->advertise<sensor_msgs::CameraInfo> (getTopicName(i, "camera_info"), 0)));
|
||||||
|
|
||||||
|
twist_pub_.emplace_back(
|
||||||
|
new ros::Publisher(
|
||||||
|
nh_->advertise<geometry_msgs::TwistStamped> (getTopicName(i, "twist"), 0)));
|
||||||
|
|
||||||
|
pointcloud_pub_.emplace_back(
|
||||||
|
new ros::Publisher(
|
||||||
|
nh_->advertise<pcl::PointCloud<pcl::PointXYZ>> (getTopicName(i, "pointcloud"), 0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
pose_pub_.reset(new ros::Publisher(nh_->advertise<geometry_msgs::PoseStamped> ("pose", 0)));
|
||||||
|
imu_pub_.reset(new ros::Publisher(nh_->advertise<sensor_msgs::Imu> ("imu", 0)));
|
||||||
|
tf_broadcaster_.reset(new tf::TransformBroadcaster());
|
||||||
|
|
||||||
|
last_published_camera_info_time_ = 0;
|
||||||
|
last_published_image_time_ = 0;
|
||||||
|
last_published_corrupted_image_time_ = 0;
|
||||||
|
last_published_depthmap_time_ = 0;
|
||||||
|
last_published_optic_flow_time_ = 0;
|
||||||
|
last_published_pointcloud_time_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
RosPublisher::~RosPublisher()
|
||||||
|
{
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
event_pub_[i]->shutdown();
|
||||||
|
image_pub_[i]->shutdown();
|
||||||
|
image_corrupted_pub_[i]->shutdown();
|
||||||
|
depthmap_pub_[i]->shutdown();
|
||||||
|
optic_flow_pub_[i]->shutdown();
|
||||||
|
camera_info_pub_[i]->shutdown();
|
||||||
|
twist_pub_[i]->shutdown();
|
||||||
|
pointcloud_pub_[i]->shutdown();
|
||||||
|
}
|
||||||
|
pose_pub_->shutdown();
|
||||||
|
|
||||||
|
ros::shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::pointcloudCallback(const PointCloudVector& pointclouds, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(pointcloud_pub_.size(), num_cameras_);
|
||||||
|
CHECK_EQ(pointclouds.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
const PointCloud& pcl_camera = pointclouds[i];
|
||||||
|
|
||||||
|
CHECK(pointcloud_pub_[i]);
|
||||||
|
if(pointcloud_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Duration min_time_interval_between_published_pointclouds_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate);
|
||||||
|
if(last_published_pointcloud_time_ > 0 && t - last_published_pointcloud_time_ < min_time_interval_between_published_pointclouds_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pcl::PointCloud<pcl::PointXYZRGB>::Ptr msg (new pcl::PointCloud<pcl::PointXYZRGB>);
|
||||||
|
std::stringstream ss; ss << "cam" << i;
|
||||||
|
pointCloudToMsg(pointclouds[i], ss.str(), t, msg);
|
||||||
|
pointcloud_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_pointcloud_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::imageCallback(const ImagePtrVector& images, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(image_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
sensor_sizes_[i] = images[i]->size();
|
||||||
|
|
||||||
|
CHECK(image_pub_[i]);
|
||||||
|
if(image_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_images_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
|
||||||
|
if(last_published_image_time_ > 0 && t - last_published_image_time_ < min_time_interval_between_published_images_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(images[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
imageToMsg(*images[i], t, msg);
|
||||||
|
image_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_image_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::imageCorruptedCallback(const ImagePtrVector& corrupted_images, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(image_corrupted_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(image_corrupted_pub_[i]);
|
||||||
|
if(image_corrupted_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_images_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
|
||||||
|
if(last_published_corrupted_image_time_ > 0 && t - last_published_corrupted_image_time_ < min_time_interval_between_published_images_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(corrupted_images[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
imageToMsg(*corrupted_images[i], t, msg);
|
||||||
|
image_corrupted_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_corrupted_image_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(depthmap_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(depthmap_pub_[i]);
|
||||||
|
if(depthmap_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_depthmaps_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate);
|
||||||
|
if(last_published_depthmap_time_ > 0 && t - last_published_depthmap_time_ < min_time_interval_between_published_depthmaps_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(depthmaps[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
depthmapToMsg(*depthmaps[i], t, msg);
|
||||||
|
depthmap_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_depthmap_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RosPublisher::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(optic_flow_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(optic_flow_pub_[i]);
|
||||||
|
if(optic_flow_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_optic_flows_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_optic_flow_rate);
|
||||||
|
if(last_published_optic_flow_time_ > 0 && t - last_published_optic_flow_time_ < min_time_interval_between_published_optic_flows_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(optic_flows[i])
|
||||||
|
{
|
||||||
|
esim_msgs::OpticFlow::Ptr msg;
|
||||||
|
msg.reset(new esim_msgs::OpticFlow);
|
||||||
|
opticFlowToMsg(*optic_flows[i], t, msg);
|
||||||
|
optic_flow_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_optic_flow_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::eventsCallback(const EventsVector& events)
|
||||||
|
{
|
||||||
|
CHECK_EQ(event_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
if(sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(events[i].empty())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(event_pub_[i]);
|
||||||
|
if(event_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dvs_msgs::EventArrayPtr msg;
|
||||||
|
msg.reset(new dvs_msgs::EventArray);
|
||||||
|
eventsToMsg(events[i], sensor_sizes_[i].width, sensor_sizes_[i].height, msg);
|
||||||
|
event_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::poseCallback(const Transformation& T_W_B,
|
||||||
|
const TransformationVector& T_W_Cs,
|
||||||
|
Time t)
|
||||||
|
{
|
||||||
|
if(T_W_Cs.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Number of poses is different than number of cameras."
|
||||||
|
<< "Will not output poses.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to tf
|
||||||
|
tf::StampedTransform bt;
|
||||||
|
bt.child_frame_id_ = "body";
|
||||||
|
bt.frame_id_ = "map";
|
||||||
|
bt.stamp_ = toRosTime(t);
|
||||||
|
tf::transformKindrToTF(T_W_B, &bt);
|
||||||
|
tf_broadcaster_->sendTransform(bt);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "cam" << i;
|
||||||
|
tf::StampedTransform bt;
|
||||||
|
bt.child_frame_id_ = ss.str();
|
||||||
|
bt.frame_id_ = "map";
|
||||||
|
bt.stamp_ = toRosTime(t);
|
||||||
|
tf::transformKindrToTF(T_W_Cs[i], &bt);
|
||||||
|
tf_broadcaster_->sendTransform(bt);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish pose message
|
||||||
|
geometry_msgs::PoseStamped pose_stamped_msg;
|
||||||
|
tf::poseStampedKindrToMsg(T_W_B, toRosTime(t), "map", &pose_stamped_msg);
|
||||||
|
pose_pub_->publish(pose_stamped_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::twistCallback(const AngularVelocityVector &ws, const LinearVelocityVector &vs, Time t)
|
||||||
|
{
|
||||||
|
if(ws.size() != num_cameras_
|
||||||
|
|| vs.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Number of twists is different than number of cameras."
|
||||||
|
<< "Will not output twists.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
CHECK_EQ(ws.size(), num_cameras_);
|
||||||
|
CHECK_EQ(vs.size(), num_cameras_);
|
||||||
|
CHECK_EQ(twist_pub_.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(twist_pub_[i]);
|
||||||
|
if(twist_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t);
|
||||||
|
twist_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::imuCallback(const Vector3& acc, const Vector3& gyr, Time t)
|
||||||
|
{
|
||||||
|
if(imu_pub_->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t);
|
||||||
|
imu_pub_->publish(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosPublisher::cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t)
|
||||||
|
{
|
||||||
|
CHECK(camera_rig);
|
||||||
|
CHECK_EQ(camera_rig->size(), num_cameras_);
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_camera_info_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate);
|
||||||
|
if(last_published_camera_info_time_ > 0 && t - last_published_camera_info_time_ < min_time_interval_between_published_camera_info_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
CHECK(camera_info_pub_[i]);
|
||||||
|
if(camera_info_pub_[i]->getNumSubscribers() == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
sensor_msgs::CameraInfoPtr msg;
|
||||||
|
msg.reset(new sensor_msgs::CameraInfo);
|
||||||
|
cameraToMsg(camera_rig->atShared(i), t, msg);
|
||||||
|
camera_info_pub_[i]->publish(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_camera_info_time_ = t;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
169
event_camera_simulator/esim_visualization/src/ros_utils.cpp
Normal file
169
event_camera_simulator/esim_visualization/src/ros_utils.cpp
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
#include <esim/visualization/ros_utils.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <pcl_conversions/pcl_conversions.h>
|
||||||
|
#include <cv_bridge/cv_bridge.h>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
void pointCloudToMsg(const PointCloud& pointcloud, const std::string& frame_id, Time t, pcl::PointCloud<pcl::PointXYZRGB>::Ptr& msg)
|
||||||
|
{
|
||||||
|
CHECK(msg);
|
||||||
|
msg->header.frame_id = frame_id;
|
||||||
|
msg->height = pointcloud.size();
|
||||||
|
msg->width = 1;
|
||||||
|
for(auto& p_c : pointcloud)
|
||||||
|
{
|
||||||
|
pcl::PointXYZRGB p;
|
||||||
|
p.x = p_c.xyz(0);
|
||||||
|
p.y = p_c.xyz(1);
|
||||||
|
p.z = p_c.xyz(2);
|
||||||
|
p.r = p_c.rgb(0);
|
||||||
|
p.g = p_c.rgb(1);
|
||||||
|
p.b = p_c.rgb(2);
|
||||||
|
msg->points.push_back(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
pcl_conversions::toPCL(toRosTime(t), msg->header.stamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
void imageToMsg(const Image& image, Time t, sensor_msgs::ImagePtr& msg)
|
||||||
|
{
|
||||||
|
cv_bridge::CvImage cv_image;
|
||||||
|
image.convertTo(cv_image.image, CV_8U, 255.0);
|
||||||
|
cv_image.encoding = "mono8";
|
||||||
|
cv_image.header.stamp = toRosTime(t);
|
||||||
|
msg = cv_image.toImageMsg();
|
||||||
|
}
|
||||||
|
|
||||||
|
void depthmapToMsg(const Depthmap& depthmap, Time t, sensor_msgs::ImagePtr& msg)
|
||||||
|
{
|
||||||
|
cv_bridge::CvImage cv_depthmap;
|
||||||
|
depthmap.convertTo(cv_depthmap.image, CV_32FC1);
|
||||||
|
cv_depthmap.encoding = "32FC1";
|
||||||
|
cv_depthmap.header.stamp = toRosTime(t);
|
||||||
|
msg = cv_depthmap.toImageMsg();
|
||||||
|
}
|
||||||
|
|
||||||
|
void opticFlowToMsg(const OpticFlow& flow, Time t, esim_msgs::OpticFlowPtr& msg)
|
||||||
|
{
|
||||||
|
CHECK(msg);
|
||||||
|
msg->header.stamp = toRosTime(t);
|
||||||
|
|
||||||
|
const int height = flow.rows;
|
||||||
|
const int width = flow.cols;
|
||||||
|
msg->height = height;
|
||||||
|
msg->width = width;
|
||||||
|
|
||||||
|
msg->flow_x.resize(height * width);
|
||||||
|
msg->flow_y.resize(height * width);
|
||||||
|
for(int y=0; y<height; ++y)
|
||||||
|
{
|
||||||
|
for(int x=0; x<width; ++x)
|
||||||
|
{
|
||||||
|
msg->flow_x[x + y * width] = static_cast<float>(flow(y,x)[0]);
|
||||||
|
msg->flow_y[x + y * width] = static_cast<float>(flow(y,x)[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void eventsToMsg(const Events& events, int width, int height, dvs_msgs::EventArrayPtr& msg)
|
||||||
|
{
|
||||||
|
CHECK(msg);
|
||||||
|
std::vector<dvs_msgs::Event> events_list;
|
||||||
|
for(const Event& e : events)
|
||||||
|
{
|
||||||
|
dvs_msgs::Event ev;
|
||||||
|
ev.x = e.x;
|
||||||
|
ev.y = e.y;
|
||||||
|
ev.ts = toRosTime(e.t);
|
||||||
|
ev.polarity = e.pol;
|
||||||
|
|
||||||
|
events_list.push_back(ev);
|
||||||
|
}
|
||||||
|
|
||||||
|
msg->events = events_list;
|
||||||
|
msg->height = height;
|
||||||
|
msg->width = width;
|
||||||
|
msg->header.stamp = events_list.back().ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
sensor_msgs::Imu imuToMsg(const Vector3& acc, const Vector3& gyr, Time t)
|
||||||
|
{
|
||||||
|
sensor_msgs::Imu imu;
|
||||||
|
imu.header.stamp = toRosTime(t);
|
||||||
|
|
||||||
|
imu.linear_acceleration.x = acc(0);
|
||||||
|
imu.linear_acceleration.y = acc(1);
|
||||||
|
imu.linear_acceleration.z = acc(2);
|
||||||
|
|
||||||
|
imu.angular_velocity.x = gyr(0);
|
||||||
|
imu.angular_velocity.y = gyr(1);
|
||||||
|
imu.angular_velocity.z = gyr(2);
|
||||||
|
|
||||||
|
return imu;
|
||||||
|
}
|
||||||
|
|
||||||
|
geometry_msgs::TwistStamped twistToMsg(const AngularVelocity& w, const LinearVelocity& v, Time t)
|
||||||
|
{
|
||||||
|
geometry_msgs::TwistStamped twist;
|
||||||
|
twist.header.stamp = toRosTime(t);
|
||||||
|
|
||||||
|
twist.twist.angular.x = w(0);
|
||||||
|
twist.twist.angular.y = w(1);
|
||||||
|
twist.twist.angular.z = w(2);
|
||||||
|
|
||||||
|
twist.twist.linear.x = v(0);
|
||||||
|
twist.twist.linear.y = v(1);
|
||||||
|
twist.twist.linear.z = v(2);
|
||||||
|
|
||||||
|
return twist;
|
||||||
|
}
|
||||||
|
|
||||||
|
void cameraToMsg(const ze::Camera::Ptr& camera, Time t, sensor_msgs::CameraInfoPtr& msg)
|
||||||
|
{
|
||||||
|
CHECK(msg);
|
||||||
|
msg->width = camera->width();
|
||||||
|
msg->height = camera->height();
|
||||||
|
msg->header.stamp = toRosTime(t);
|
||||||
|
|
||||||
|
CalibrationMatrix K = calibrationMatrixFromCamera(camera);
|
||||||
|
boost::array<double, 9> K_vec;
|
||||||
|
std::vector<double> D_vec;
|
||||||
|
for(int i=0; i<3; ++i)
|
||||||
|
{
|
||||||
|
for(int j=0; j<3; ++j)
|
||||||
|
{
|
||||||
|
K_vec[j+i*3] = static_cast<double>(K(i,j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch(camera->type())
|
||||||
|
{
|
||||||
|
case ze::CameraType::PinholeRadialTangential:
|
||||||
|
case ze::CameraType::Pinhole:
|
||||||
|
msg->distortion_model = "plumb_bob";
|
||||||
|
break;
|
||||||
|
case ze::CameraType::PinholeEquidistant:
|
||||||
|
msg->distortion_model = "equidistant";
|
||||||
|
break;
|
||||||
|
case ze::CameraType::PinholeFov:
|
||||||
|
msg->distortion_model = "fov";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
LOG(WARNING) << "Unknown camera distortion model";
|
||||||
|
msg->distortion_model = "";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int j=0; j<camera->distortionParameters().rows(); ++j)
|
||||||
|
{
|
||||||
|
D_vec.push_back(static_cast<double>(camera->distortionParameters()(j))); // @TODO: use the distortion params from the camera
|
||||||
|
}
|
||||||
|
|
||||||
|
msg->K = K_vec;
|
||||||
|
msg->D = D_vec;
|
||||||
|
// TODO: Add R and P
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
308
event_camera_simulator/esim_visualization/src/rosbag_writer.cpp
Normal file
308
event_camera_simulator/esim_visualization/src/rosbag_writer.cpp
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
#include <esim/visualization/rosbag_writer.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
#include <esim/visualization/ros_utils.hpp>
|
||||||
|
#include <minkindr_conversions/kindr_msg.h>
|
||||||
|
#include <minkindr_conversions/kindr_tf.h>
|
||||||
|
#include <tf/tfMessage.h>
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
|
||||||
|
DECLARE_double(ros_publisher_camera_info_rate);
|
||||||
|
DECLARE_double(ros_publisher_frame_rate);
|
||||||
|
DECLARE_double(ros_publisher_depth_rate);
|
||||||
|
DECLARE_double(ros_publisher_pointcloud_rate);
|
||||||
|
DECLARE_double(ros_publisher_optic_flow_rate);
|
||||||
|
|
||||||
|
DEFINE_string(path_to_output_bag, "",
|
||||||
|
"Path to which save the output bag file.");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
RosbagWriter::RosbagWriter(const std::string& path_to_output_bag, size_t num_cameras)
|
||||||
|
{
|
||||||
|
CHECK_GE(num_cameras, 1);
|
||||||
|
num_cameras_ = num_cameras;
|
||||||
|
sensor_sizes_ = std::vector<cv::Size>(num_cameras_);
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
bag_.open(path_to_output_bag, rosbag::bagmode::Write);
|
||||||
|
}
|
||||||
|
catch(rosbag::BagIOException e)
|
||||||
|
{
|
||||||
|
LOG(FATAL) << "Error: could not open rosbag: " << FLAGS_path_to_output_bag << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(INFO) << "Will write to bag: " << path_to_output_bag;
|
||||||
|
|
||||||
|
last_published_camera_info_time_ = 0;
|
||||||
|
last_published_image_time_ = 0;
|
||||||
|
last_published_corrupted_image_time_ = 0;
|
||||||
|
last_published_depthmap_time_ = 0;
|
||||||
|
last_published_optic_flow_time_ = 0;
|
||||||
|
last_published_pointcloud_time_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Publisher::Ptr RosbagWriter::createBagWriterFromGflags(size_t num_cameras)
|
||||||
|
{
|
||||||
|
if(FLAGS_path_to_output_bag == "")
|
||||||
|
{
|
||||||
|
LOG(INFO) << "Empty output bag string: will not write to rosbag";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<RosbagWriter>(FLAGS_path_to_output_bag, num_cameras);
|
||||||
|
}
|
||||||
|
|
||||||
|
RosbagWriter::~RosbagWriter()
|
||||||
|
{
|
||||||
|
LOG(INFO) << "Finalizing the bag...";
|
||||||
|
bag_.close();
|
||||||
|
LOG(INFO) << "Finished writing to bag: " << FLAGS_path_to_output_bag;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::pointcloudCallback(const PointCloudVector& pointclouds, Time t)
|
||||||
|
{
|
||||||
|
CHECK_EQ(pointclouds.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
Duration min_time_interval_between_published_pointclouds_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_pointcloud_rate);
|
||||||
|
if(last_published_pointcloud_time_ > 0 && t - last_published_pointcloud_time_ < min_time_interval_between_published_pointclouds_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pcl::PointCloud<pcl::PointXYZRGB>::Ptr msg (new pcl::PointCloud<pcl::PointXYZRGB>);
|
||||||
|
std::stringstream ss; ss << "cam" << i;
|
||||||
|
pointCloudToMsg(pointclouds[i], ss.str(), t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "pointcloud"),
|
||||||
|
toRosTime(t), msg);
|
||||||
|
}
|
||||||
|
last_published_pointcloud_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::imageCallback(const ImagePtrVector& images, Time t)
|
||||||
|
{
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
sensor_sizes_[i] = images[i]->size();
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_images_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
|
||||||
|
if(last_published_image_time_ > 0 && t - last_published_image_time_ < min_time_interval_between_published_images_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(images[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
imageToMsg(*images[i], t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "image_raw"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_published_image_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::imageCorruptedCallback(const ImagePtrVector& images_corrupted, Time t)
|
||||||
|
{
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
static const Duration min_time_interval_between_published_images_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_frame_rate);
|
||||||
|
if(last_published_corrupted_image_time_ > 0 && t - last_published_corrupted_image_time_ < min_time_interval_between_published_images_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(images_corrupted[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
imageToMsg(*images_corrupted[i], t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "image_corrupted"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_published_corrupted_image_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RosbagWriter::depthmapCallback(const DepthmapPtrVector& depthmaps, Time t)
|
||||||
|
{
|
||||||
|
if(depthmaps.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
static const Duration min_time_interval_between_published_depthmaps_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_depth_rate);
|
||||||
|
if(last_published_depthmap_time_ > 0 && t - last_published_depthmap_time_ < min_time_interval_between_published_depthmaps_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(depthmaps[i])
|
||||||
|
{
|
||||||
|
sensor_msgs::ImagePtr msg;
|
||||||
|
depthmapToMsg(*depthmaps[i], t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "depthmap"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_published_depthmap_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RosbagWriter::opticFlowCallback(const OpticFlowPtrVector& optic_flows, Time t)
|
||||||
|
{
|
||||||
|
if(optic_flows.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
static const Duration min_time_interval_between_published_optic_flows_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_optic_flow_rate);
|
||||||
|
if(last_published_optic_flow_time_ > 0 && t - last_published_optic_flow_time_ < min_time_interval_between_published_optic_flows_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(optic_flows[i])
|
||||||
|
{
|
||||||
|
esim_msgs::OpticFlow::Ptr msg;
|
||||||
|
msg.reset(new esim_msgs::OpticFlow);
|
||||||
|
opticFlowToMsg(*optic_flows[i], t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "optic_flow"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_optic_flow_time_ = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::eventsCallback(const EventsVector& events)
|
||||||
|
{
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
if(sensor_sizes_[i].width == 0 || sensor_sizes_[i].height == 0)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(events[i].empty())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dvs_msgs::EventArrayPtr msg;
|
||||||
|
msg.reset(new dvs_msgs::EventArray);
|
||||||
|
eventsToMsg(events[i], sensor_sizes_[i].width, sensor_sizes_[i].height, msg);
|
||||||
|
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "events"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::poseCallback(const Transformation& T_W_B,
|
||||||
|
const TransformationVector& T_W_Cs,
|
||||||
|
Time t)
|
||||||
|
{
|
||||||
|
if(T_W_Cs.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Number of poses is different than number of cameras."
|
||||||
|
<< "Will not output poses.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
geometry_msgs::PoseStamped pose_stamped_msg;
|
||||||
|
geometry_msgs::TransformStamped transform_stamped_msg;
|
||||||
|
transform_stamped_msg.header.frame_id = "map";
|
||||||
|
transform_stamped_msg.header.stamp = toRosTime(t);
|
||||||
|
tf::tfMessage tf_msg;
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
// Write pose to bag
|
||||||
|
tf::poseStampedKindrToMsg(T_W_Cs[i], toRosTime(t), "map", &pose_stamped_msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "pose"),
|
||||||
|
toRosTime(t), pose_stamped_msg);
|
||||||
|
|
||||||
|
// Write tf transform to bag
|
||||||
|
std::stringstream ss; ss << "cam" << i;
|
||||||
|
transform_stamped_msg.child_frame_id = ss.str();
|
||||||
|
tf::transformKindrToMsg(T_W_Cs[i], &transform_stamped_msg.transform);
|
||||||
|
tf_msg.transforms.push_back(transform_stamped_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
transform_stamped_msg.child_frame_id = "body";
|
||||||
|
tf::transformKindrToMsg(T_W_B, &transform_stamped_msg.transform);
|
||||||
|
tf_msg.transforms.push_back(transform_stamped_msg);
|
||||||
|
|
||||||
|
bag_.write("/tf", toRosTime(t), tf_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::twistCallback(const AngularVelocityVector &ws, const LinearVelocityVector &vs, Time t)
|
||||||
|
{
|
||||||
|
if(ws.size() != num_cameras_
|
||||||
|
|| vs.size() != num_cameras_)
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Number of twists is different than number of cameras."
|
||||||
|
<< "Will not output twists.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
CHECK_EQ(ws.size(), num_cameras_);
|
||||||
|
CHECK_EQ(vs.size(), num_cameras_);
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
const geometry_msgs::TwistStamped msg = twistToMsg(ws[i], vs[i], t);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "twist"),
|
||||||
|
msg.header.stamp, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::imuCallback(const Vector3& acc, const Vector3& gyr, Time t)
|
||||||
|
{
|
||||||
|
VLOG_EVERY_N(1, 500) << "t = " << ze::nanosecToSecTrunc(t) << " s";
|
||||||
|
|
||||||
|
const sensor_msgs::Imu msg = imuToMsg(acc, gyr, t);
|
||||||
|
const std::string imu_topic = "/imu";
|
||||||
|
bag_.write(imu_topic,
|
||||||
|
msg.header.stamp, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RosbagWriter::cameraInfoCallback(const ze::CameraRig::Ptr& camera_rig, Time t)
|
||||||
|
{
|
||||||
|
CHECK(camera_rig);
|
||||||
|
CHECK_EQ(camera_rig->size(), num_cameras_);
|
||||||
|
|
||||||
|
static const Duration min_time_interval_between_published_camera_info_
|
||||||
|
= ze::secToNanosec(1.0 / FLAGS_ros_publisher_camera_info_rate);
|
||||||
|
if(last_published_camera_info_time_ > 0 && t - last_published_camera_info_time_ < min_time_interval_between_published_camera_info_)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t i=0; i<num_cameras_; ++i)
|
||||||
|
{
|
||||||
|
sensor_msgs::CameraInfoPtr msg;
|
||||||
|
msg.reset(new sensor_msgs::CameraInfo);
|
||||||
|
cameraToMsg(camera_rig->atShared(i), t, msg);
|
||||||
|
bag_.write(getTopicName(topic_name_prefix_, i, "camera_info"),
|
||||||
|
msg->header.stamp, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
last_published_camera_info_time_ = t;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,107 @@
|
|||||||
|
#include <esim/visualization/synthetic_optic_flow_publisher.hpp>
|
||||||
|
#include <esim/common/utils.hpp>
|
||||||
|
#include <ze/common/path_utils.hpp>
|
||||||
|
#include <ze/common/file_utils.hpp>
|
||||||
|
#include <ze/common/time_conversions.hpp>
|
||||||
|
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
#include <gflags/gflags.h>
|
||||||
|
#include <glog/logging.h>
|
||||||
|
|
||||||
|
DEFINE_string(synthetic_optic_flow_output_folder, "",
|
||||||
|
"Folder in which to output the events.");
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This publisher was designed with the purpose of generating simulation data
|
||||||
|
* with ground truth labels, for the task of optic flow estimation.
|
||||||
|
*
|
||||||
|
* It assumes that it will receive a relatively small sequence of events (corresponding, for example,
|
||||||
|
* to all the events in between two frames), and will write all the events to disk in its destructor,
|
||||||
|
* in three forms:
|
||||||
|
* - an "events.txt" file that contains all the events in "t x y pol" format (one event per line)
|
||||||
|
* - an "event_count.png" image that whose first two channels contain the counts of the positive (resp. negative) event counts at each pixel
|
||||||
|
* - two "timestamps images" in which each pixel contains the timestamp at the last event that fell on the pixel.
|
||||||
|
* (since the timestamp is a floating point value, it is split in 3 8-bit values so that the timestamp images
|
||||||
|
* can be saved in a single 3-channel image).
|
||||||
|
*/
|
||||||
|
SyntheticOpticFlowPublisher::SyntheticOpticFlowPublisher(const std::string& output_folder)
|
||||||
|
: output_folder_(output_folder)
|
||||||
|
{
|
||||||
|
ze::openOutputFileStream(ze::joinPath(output_folder, "events.txt"),
|
||||||
|
&events_file_);
|
||||||
|
}
|
||||||
|
|
||||||
|
Publisher::Ptr SyntheticOpticFlowPublisher::createFromGflags()
|
||||||
|
{
|
||||||
|
if(FLAGS_synthetic_optic_flow_output_folder == "")
|
||||||
|
{
|
||||||
|
LOG(WARNING) << "Empty output folder string: will not write synthetic optic flow files";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<SyntheticOpticFlowPublisher>(FLAGS_synthetic_optic_flow_output_folder);
|
||||||
|
}
|
||||||
|
|
||||||
|
SyntheticOpticFlowPublisher::~SyntheticOpticFlowPublisher()
|
||||||
|
{
|
||||||
|
// Create an event count image using all the events collected
|
||||||
|
cv::Mat event_count_image = cv::Mat::zeros(sensor_size_, CV_8UC3);
|
||||||
|
|
||||||
|
// Create two event timestamps images using all the events collected
|
||||||
|
cv::Mat timestamps_pos = cv::Mat::zeros(sensor_size_, CV_8UC3);
|
||||||
|
cv::Mat timestamps_neg = cv::Mat::zeros(sensor_size_, CV_8UC3);
|
||||||
|
|
||||||
|
int remapped_timestamp_fraction;
|
||||||
|
double timestamp_fraction;
|
||||||
|
for(Event e : events_)
|
||||||
|
{
|
||||||
|
event_count_image.at<cv::Vec3b>(e.y,e.x)[int(e.pol)]++;
|
||||||
|
|
||||||
|
cv::Mat& curr_timestamp_image = e.pol ? timestamps_pos : timestamps_neg;
|
||||||
|
|
||||||
|
// remap value
|
||||||
|
timestamp_fraction = double(e.t - events_[0].t) / (events_[events_.size()-1].t - events_[0].t);
|
||||||
|
remapped_timestamp_fraction = timestamp_fraction * std::pow(2,24); // remap 0-1 to 0 - 2^24
|
||||||
|
|
||||||
|
// distribute the 24 bit number (remapped_timestamp_fraction) to 3 channel 8 bit image
|
||||||
|
for (int i=0; i<3; i++)
|
||||||
|
{
|
||||||
|
curr_timestamp_image.at<cv::Vec3b>(e.y,e.x)[i] = (int) remapped_timestamp_fraction & 0xFF; // bit mask of 0000 0000 0000 0000 1111 1111
|
||||||
|
remapped_timestamp_fraction = remapped_timestamp_fraction >> 8; // shifts bits to right by 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write event count image + the two timestamps images to disk
|
||||||
|
std::string path_event_count_image = ze::joinPath(output_folder_, "event_count.png");
|
||||||
|
std::string path_timestamps_pos = ze::joinPath(output_folder_, "event_time_stamps_pos.png");
|
||||||
|
std::string path_timestamps_neg = ze::joinPath(output_folder_, "event_time_stamps_neg.png");
|
||||||
|
|
||||||
|
std::vector<int> compression_params;
|
||||||
|
compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
|
||||||
|
compression_params.push_back(0);
|
||||||
|
|
||||||
|
cv::imwrite(path_event_count_image, event_count_image, compression_params);
|
||||||
|
cv::imwrite(path_timestamps_pos, timestamps_pos, compression_params);
|
||||||
|
cv::imwrite(path_timestamps_neg, timestamps_neg, compression_params);
|
||||||
|
|
||||||
|
// Finish writing event file
|
||||||
|
events_file_.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyntheticOpticFlowPublisher::eventsCallback(const EventsVector& events)
|
||||||
|
{
|
||||||
|
CHECK_EQ(events.size(), 1);
|
||||||
|
|
||||||
|
// Simply aggregate the events into the events_ buffer.
|
||||||
|
// At the destruction of this object, everything will be saved to disk.
|
||||||
|
for(const Event& e : events[0])
|
||||||
|
{
|
||||||
|
events_file_ << e.t << " " << e.x << " " << e.y << " " << (e.pol? 1 : 0) << std::endl;
|
||||||
|
events_.push_back(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,22 @@
|
|||||||
|
cmake_minimum_required(VERSION 2.8.3)
|
||||||
|
project(imp_multi_objects_2d)
|
||||||
|
|
||||||
|
find_package(catkin_simple REQUIRED)
|
||||||
|
find_package(OpenCV REQUIRED)
|
||||||
|
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||||
|
catkin_simple()
|
||||||
|
|
||||||
|
set(HEADERS
|
||||||
|
include/esim/imp_multi_objects_2d/imp_multi_objects_2d_renderer.hpp
|
||||||
|
include/esim/imp_multi_objects_2d/object.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
set(SOURCES
|
||||||
|
src/imp_multi_objects_2d_renderer.cpp
|
||||||
|
src/object.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_add_library(${PROJECT_NAME} ${SOURCES} ${HEADERS})
|
||||||
|
|
||||||
|
cs_install()
|
||||||
|
cs_export()
|
@ -0,0 +1,35 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/rendering/simple_renderer_base.hpp>
|
||||||
|
#include <esim/imp_multi_objects_2d/object.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
//! A rendering engine for planar scenes
|
||||||
|
class MultiObject2DRenderer : public SimpleRenderer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
|
|
||||||
|
MultiObject2DRenderer();
|
||||||
|
|
||||||
|
~MultiObject2DRenderer();
|
||||||
|
|
||||||
|
virtual bool render(const Time t,
|
||||||
|
const ImagePtr& out_image,
|
||||||
|
const OpticFlowPtr& optic_flow_map) const;
|
||||||
|
|
||||||
|
virtual int getWidth() const { return width_; }
|
||||||
|
|
||||||
|
virtual int getHeight() const { return height_; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
void outputGroundTruthData();
|
||||||
|
|
||||||
|
std::vector<std::shared_ptr<Object>> objects_;
|
||||||
|
int width_, height_;
|
||||||
|
ze::real_t tmax_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
@ -0,0 +1,121 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <esim/common/types.hpp>
|
||||||
|
|
||||||
|
namespace event_camera_simulator {
|
||||||
|
|
||||||
|
bool loadTexture(const std::string& path_to_img, cv::Mat *img,
|
||||||
|
double median_blur, double gaussian_blur);
|
||||||
|
|
||||||
|
using Affine = cv::Matx<FloatType, 3,3>;
|
||||||
|
using AffineWithJacobian = std::pair<Affine, Affine>;
|
||||||
|
|
||||||
|
class MotionParameters
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
MotionParameters(FloatType tmax,
|
||||||
|
FloatType theta0_deg, FloatType theta1_deg,
|
||||||
|
FloatType x0, FloatType x1,
|
||||||
|
FloatType y0, FloatType y1,
|
||||||
|
FloatType sx0, FloatType sx1,
|
||||||
|
FloatType sy0, FloatType sy1)
|
||||||
|
: tmax_(tmax),
|
||||||
|
x0_(x0),
|
||||||
|
x1_(x1),
|
||||||
|
y0_(y0),
|
||||||
|
y1_(y1),
|
||||||
|
theta0_(theta0_deg * CV_PI / 180.),
|
||||||
|
theta1_(theta1_deg * CV_PI / 180.),
|
||||||
|
sx0_(sx0),
|
||||||
|
sx1_(sx1),
|
||||||
|
sy0_(sy0),
|
||||||
|
sy1_(sy1)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
AffineWithJacobian getAffineTransformationWithJacobian(ze::real_t t)
|
||||||
|
{
|
||||||
|
// constants
|
||||||
|
const ze::real_t dtheta = theta1_ - theta0_;
|
||||||
|
const ze::real_t dx = x1_ - x0_;
|
||||||
|
const ze::real_t dy = y1_ - y0_;
|
||||||
|
const ze::real_t dsx = sx1_ - sx0_;
|
||||||
|
const ze::real_t dsy = sy1_ - sy0_;
|
||||||
|
|
||||||
|
// computation of parameter(t)
|
||||||
|
const ze::real_t theta = theta0_ + t/tmax_ * dtheta;
|
||||||
|
const ze::real_t x = x0_ + t/tmax_ * dx;
|
||||||
|
const ze::real_t y = y0_ + t/tmax_ * dy;
|
||||||
|
const ze::real_t sx = sx0_ + t/tmax_ * dsx;
|
||||||
|
const ze::real_t sy = sy0_ + t/tmax_ * dsy;
|
||||||
|
const ze::real_t stheta = std::sin(theta);
|
||||||
|
const ze::real_t ctheta = std::cos(theta);
|
||||||
|
|
||||||
|
Affine A;
|
||||||
|
A << sx * ctheta, -sy * stheta, x,
|
||||||
|
sx * stheta, sy * ctheta, y,
|
||||||
|
0, 0, 1;
|
||||||
|
|
||||||
|
// computation of dparameter_dt(t)
|
||||||
|
const ze::real_t dtheta_dt = 1./tmax_ * dtheta;
|
||||||
|
const ze::real_t dx_dt = 1./tmax_ * dx;
|
||||||
|
const ze::real_t dy_dt = 1./tmax_ * dy;
|
||||||
|
const ze::real_t dsx_dt = 1./tmax_ * dsx;
|
||||||
|
const ze::real_t dsy_dt = 1./tmax_ * dsy;
|
||||||
|
|
||||||
|
cv::Matx<FloatType, 3, 3> dAdt;
|
||||||
|
dAdt << dsx_dt * ctheta - dtheta_dt * stheta * sx, -dsy_dt * stheta - dtheta_dt * ctheta * sy, dx_dt,
|
||||||
|
dsx_dt * stheta + dtheta_dt * ctheta * sx, dsy_dt * ctheta - dtheta_dt * stheta * sy, dy_dt,
|
||||||
|
0.0, 0.0, 0.0;
|
||||||
|
|
||||||
|
return AffineWithJacobian(A, dAdt);
|
||||||
|
}
|
||||||
|
|
||||||
|
FloatType tmax_;
|
||||||
|
FloatType x0_, x1_;
|
||||||
|
FloatType y0_, y1_;
|
||||||
|
FloatType theta0_, theta1_;
|
||||||
|
FloatType sx0_, sx1_;
|
||||||
|
FloatType sy0_, sy1_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Object
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Object(const std::string path_to_texture, const cv::Size& dst_size, const MotionParameters& motion_params,
|
||||||
|
double median_blur, double gaussian_blur);
|
||||||
|
|
||||||
|
void draw(Time t, bool is_first_layer = false);
|
||||||
|
|
||||||
|
cv::Mat canvas_;
|
||||||
|
OpticFlow flow_;
|
||||||
|
|
||||||
|
MotionParameters getMotionParameters() const { return p_; }
|
||||||
|
|
||||||
|
Affine getK0() const { return K0_; }
|
||||||
|
Affine getK1() const { return K1_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
cv::Mat texture_;
|
||||||
|
cv::Size dst_size_;
|
||||||
|
MotionParameters p_;
|
||||||
|
|
||||||
|
Affine K0_, K1_;
|
||||||
|
};
|
||||||
|
|
||||||
|
void getIntensityAndAlpha(const cv::Mat& image,
|
||||||
|
int x, int y,
|
||||||
|
ImageFloatType* intensity,
|
||||||
|
ImageFloatType* alpha);
|
||||||
|
|
||||||
|
inline ImageFloatType bgrToGrayscale(uchar b,
|
||||||
|
uchar g,
|
||||||
|
uchar r)
|
||||||
|
{
|
||||||
|
// https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/
|
||||||
|
return 0.07 * static_cast<ImageFloatType>(b)
|
||||||
|
+ 0.72 * static_cast<ImageFloatType>(g)
|
||||||
|
+ 0.21 * static_cast<ImageFloatType>(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace event_camera_simulator
|
21
event_camera_simulator/imp/imp_multi_objects_2d/package.xml
Normal file
21
event_camera_simulator/imp/imp_multi_objects_2d/package.xml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>imp_multi_objects_2d</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>2D rendering engine that simulates multiple objects moving independently in a scene.</description>
|
||||||
|
|
||||||
|
<maintainer email="rebecq@ifi.uzh.ch">Henri Rebecq</maintainer>
|
||||||
|
|
||||||
|
<license>GPLv3</license>
|
||||||
|
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<buildtool_depend>catkin_simple</buildtool_depend>
|
||||||
|
|
||||||
|
<depend>esim_common</depend>
|
||||||
|
<depend>esim_rendering</depend>
|
||||||
|
<depend>ze_common</depend>
|
||||||
|
<depend>ze_cameras</depend>
|
||||||
|
<depend>gflags_catkin</depend>
|
||||||
|
<depend>glog_catkin</depend>
|
||||||
|
|
||||||
|
</package>
|
@ -0,0 +1,3 @@
|
|||||||
|
346 260 1.0
|
||||||
|
/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_planar_renderer/textures/office.jpg 15 1.0 -5.0 15.0 0.0 0.1 0.0 -0.07 1.2 1.6 1.2 1.6
|
||||||
|
/home/user/esim_ws/src/rpg_esim/event_camera_simulator/imp/imp_multi_objects_2d/textures/rugby_ball.png 11 1.0 -360.0 360.0 -1.5 1.5 -0.7 0.2 0.4 0.2 0.4 0.2
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user