Compare commits

..

5 Commits

Author SHA1 Message Date
3fcbbd667b Added the generation of camera poses when the
timestamps are generated
2023-08-29 12:13:06 +02:00
3ba975a830 Added the possibility to get the hdr rgb images from a data provider.
For now only made it work with 'from_folder' and 'opengl_renderer'
2023-08-03 11:02:47 +02:00
200975a3f0 commmented out the image callback from the hdr publisher because it
wasn't getting the motion blured version of the images
2023-08-03 11:01:06 +02:00
a4d013c9d7 updated the locations of the frames in config file 2023-08-03 10:59:50 +02:00
991de69b8f added clang-format to gitignore 2023-08-03 10:50:02 +02:00
16 changed files with 173 additions and 32 deletions

View File

@ -2,3 +2,4 @@
esim_ros/scripts/exp_* esim_ros/scripts/exp_*
*.swp *.swp
*.autosave *.autosave
.clang-format

View File

@ -11,12 +11,16 @@ namespace event_camera_simulator {
ZE_POINTER_TYPEDEFS(ImageBuffer); ZE_POINTER_TYPEDEFS(ImageBuffer);
struct ImageData { struct ImageData {
ImageData(Image img, Time stamp, Duration exposure_time) ImageData(
Image img, ImageRGB img_rgb, Time stamp, Duration exposure_time
)
: image(img), : image(img),
image_rgb(img_rgb),
stamp(stamp), stamp(stamp),
exposure_time(exposure_time) {} exposure_time(exposure_time) {}
Image image; Image image;
ImageRGB image_rgb;
Time stamp; Time stamp;
Duration exposure_time; // timestamp since last image (0 if this is Duration exposure_time; // timestamp since last image (0 if this is
// the first image) // the first image)
@ -27,7 +31,7 @@ namespace event_camera_simulator {
// Rolling image buffer of mazimum size 'buffer_size_ns'. // Rolling image buffer of mazimum size 'buffer_size_ns'.
ImageBuffer(Duration buffer_size_ns): buffer_size_ns_(buffer_size_ns) {} ImageBuffer(Duration buffer_size_ns): buffer_size_ns_(buffer_size_ns) {}
void addImage(Time t, const Image& img); void addImage(Time t, const Image& img, const ImageRGB& img_rgb);
std::deque<ImageData> getRawBuffer() const { std::deque<ImageData> getRawBuffer() const {
return data_; return data_;
@ -63,7 +67,10 @@ namespace event_camera_simulator {
} }
bool imageCallback( bool imageCallback(
const Image& img, Time time, const ImagePtr& camera_image const Image& img,
const ImageRGB& img_rgb,
Time time,
const ImagePtr& camera_image
); );
private: private:

View File

@ -4,6 +4,7 @@
#include <esim/esim/camera_simulator.hpp> #include <esim/esim/camera_simulator.hpp>
#include <fstream> #include <fstream>
#include <iomanip> #include <iomanip>
#include <iostream>
#include <ostream> #include <ostream>
#include <ze/common/file_utils.hpp> #include <ze/common/file_utils.hpp>
@ -12,7 +13,8 @@ static std::ofstream exposures_file_;
namespace event_camera_simulator { namespace event_camera_simulator {
void ImageBuffer::addImage(Time t, const Image& img) { void
ImageBuffer::addImage(Time t, const Image& img, const ImageRGB& img_rgb) {
if (!data_.empty()) { if (!data_.empty()) {
// Check that the image timestamps are monotonically increasing // Check that the image timestamps are monotonically increasing
CHECK_GT(t, data_.back().stamp); CHECK_GT(t, data_.back().stamp);
@ -21,7 +23,9 @@ namespace event_camera_simulator {
Duration exposure_time = data_.empty() ? 0 : t - data_.back().stamp; Duration exposure_time = data_.empty() ? 0 : t - data_.back().stamp;
VLOG(2) << "Adding image to buffer with stamp: " << t VLOG(2) << "Adding image to buffer with stamp: " << t
<< " and exposure time " << exposure_time; << " and exposure time " << exposure_time;
data_.push_back(ImageData(img.clone(), t, exposure_time)); data_.push_back(
ImageData(img.clone(), img_rgb.clone(), t, exposure_time)
);
// Remove all the images with timestamp older than t - buffer_size_ns_ // Remove all the images with timestamp older than t - buffer_size_ns_
auto first_valid_element = std::lower_bound( auto first_valid_element = std::lower_bound(
@ -40,12 +44,16 @@ namespace event_camera_simulator {
} }
bool CameraSimulator::imageCallback( bool CameraSimulator::imageCallback(
const Image& img, Time time, const ImagePtr& camera_image const Image& img,
const ImageRGB& img_rgb,
Time time,
const ImagePtr& camera_image
) { ) {
CHECK(camera_image); CHECK(camera_image);
CHECK_EQ(camera_image->size(), img.size()); CHECK_EQ(camera_image->size(), img.size());
CHECK_EQ(img_rgb.size(), img.size());
buffer_->addImage(time, img); buffer_->addImage(time, img, img_rgb);
static const Time initial_time = time; static const Time initial_time = time;
if (time - initial_time < exposure_time_) { if (time - initial_time < exposure_time_) {
@ -55,10 +63,15 @@ namespace event_camera_simulator {
return false; return false;
} }
ImageRGB rgb(img.rows, img.cols, CV_32FC3);
rgb.setTo(0.);
// average all the images in the buffer to simulate motion blur // average all the images in the buffer to simulate motion blur
camera_image->setTo(0); camera_image->setTo(0);
ze::real_t denom = 0.; ze::real_t denom = 0.;
for (const ImageBuffer::ImageData& img : buffer_->getRawBuffer()) { for (const ImageBuffer::ImageData& img : buffer_->getRawBuffer()) {
rgb +=
ze::nanosecToMillisecTrunc(img.exposure_time) * img.image_rgb;
*camera_image += *camera_image +=
ze::nanosecToMillisecTrunc(img.exposure_time) * img.image; ze::nanosecToMillisecTrunc(img.exposure_time) * img.image;
denom += ze::nanosecToMillisecTrunc(img.exposure_time); denom += ze::nanosecToMillisecTrunc(img.exposure_time);
@ -70,7 +83,7 @@ namespace event_camera_simulator {
ss << hdr_output_folder << "/frames/frame_" << std::setfill('0') ss << hdr_output_folder << "/frames/frame_" << std::setfill('0')
<< std::setw(5) << frame_number++ << ".exr"; << std::setw(5) << frame_number++ << ".exr";
std::string frame_path = ss.str(); std::string frame_path = ss.str();
cv::imwrite(frame_path, *camera_image); cv::imwrite(frame_path, rgb);
// ze::openOutputFileStream( // ze::openOutputFileStream(
// ze::joinPath(output_folder, "exposures.csv"), // ze::joinPath(output_folder, "exposures.csv"),

View File

@ -46,6 +46,7 @@ namespace event_camera_simulator {
camera_simulator_success = camera_simulator_success =
camera_simulators_[i].imageCallback( camera_simulators_[i].imageCallback(
*sim_data.images[i], *sim_data.images[i],
*sim_data.images_rgb[i],
time, time,
corrupted_camera_images_[i] corrupted_camera_images_[i]
); );

View File

@ -42,12 +42,15 @@ namespace event_camera_simulator {
using Duration = ze::uint64_t; using Duration = ze::uint64_t;
using Image = cv::Mat_<ImageFloatType>; using Image = cv::Mat_<ImageFloatType>;
using ImagePtr = std::shared_ptr<Image>; using ImagePtr = std::shared_ptr<Image>;
using ImageRGB = cv::Mat;
using ImageRGBPtr = std::shared_ptr<ImageRGB>;
using Depthmap = cv::Mat_<ImageFloatType>; using Depthmap = cv::Mat_<ImageFloatType>;
using OpticFlow = cv::Mat_<cv::Vec<ImageFloatType, 2>>; using OpticFlow = cv::Mat_<cv::Vec<ImageFloatType, 2>>;
using OpticFlowPtr = std::shared_ptr<OpticFlow>; using OpticFlowPtr = std::shared_ptr<OpticFlow>;
using DepthmapPtr = std::shared_ptr<Depthmap>; using DepthmapPtr = std::shared_ptr<Depthmap>;
using ImagePtrVector = std::vector<ImagePtr>; using ImagePtrVector = std::vector<ImagePtr>;
using ImageRGBPtrVector = std::vector<ImageRGBPtr>;
using DepthmapPtrVector = std::vector<DepthmapPtr>; using DepthmapPtrVector = std::vector<DepthmapPtr>;
using OpticFlowPtrVector = std::vector<OpticFlowPtr>; using OpticFlowPtrVector = std::vector<OpticFlowPtr>;
@ -98,6 +101,7 @@ namespace event_camera_simulator {
//! Camera images. //! Camera images.
ImagePtrVector images; ImagePtrVector images;
ImageRGBPtrVector images_rgb;
//! Depth maps. //! Depth maps.
DepthmapPtrVector depthmaps; DepthmapPtrVector depthmaps;

View File

@ -1,4 +1,11 @@
#include "esim/imp_multi_objects_2d/object.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/types_c.h"
#include <cstdint>
#include <esim/data_provider/data_provider_from_folder.hpp> #include <esim/data_provider/data_provider_from_folder.hpp>
#include <iostream>
#include <opencv2/imgcodecs/imgcodecs.hpp> #include <opencv2/imgcodecs/imgcodecs.hpp>
#include <ze/cameras/camera_impl.hpp> #include <ze/cameras/camera_impl.hpp>
#include <ze/common/file_utils.hpp> #include <ze/common/file_utils.hpp>
@ -40,6 +47,11 @@ namespace event_camera_simulator {
sim_data_.images.emplace_back(ImagePtr(new Image( sim_data_.images.emplace_back(ImagePtr(new Image(
cv::Size(camera_rig_->at(0).width(), camera_rig_->at(0).height()) cv::Size(camera_rig_->at(0).width(), camera_rig_->at(0).height())
))); )));
sim_data_.images_rgb.emplace_back(ImageRGBPtr(new ImageRGB(
camera_rig_->at(0).width(),
camera_rig_->at(0).height(),
CV_32FC3
)));
sim_data_.camera_rig = camera_rig_; sim_data_.camera_rig = camera_rig_;
sim_data_.images_updated = true; sim_data_.images_updated = true;
@ -74,12 +86,17 @@ namespace event_camera_simulator {
const std::string& path_to_image = const std::string& path_to_image =
ze::joinPath(path_to_data_folder_, items[1]); ze::joinPath(path_to_data_folder_, items[1]);
cv::Mat image = cv::imread(path_to_image, 0); cv::Mat image = cv::imread(path_to_image);
CHECK(image.data) CHECK(image.data)
<< "Could not load image from file: " << path_to_image; << "Could not load image from file: " << path_to_image;
VLOG(3) << "Read image from file: " << path_to_image; VLOG(3) << "Read image from file: " << path_to_image;
image.convertTo( image.copyTo(*sim_data_.images_rgb[0]);
cv::Mat image_gray = image;
cv::cvtColor(image, image_gray, CV_BGR2GRAY);
image_gray.convertTo(
*sim_data_.images[0], *sim_data_.images[0],
cv::DataType<ImageFloatType>::type, cv::DataType<ImageFloatType>::type,
1. / 255. 1. / 255.

View File

@ -103,11 +103,15 @@ namespace event_camera_simulator {
); );
sim_data_.images.emplace_back(ImagePtr(new Image(size))); sim_data_.images.emplace_back(ImagePtr(new Image(size)));
sim_data_.images_rgb.emplace_back(
ImageRGBPtr(new ImageRGB(size, CV_32FC3))
);
sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size))); sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size)));
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size)) sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))
); );
sim_data_.images[i]->setTo(0); sim_data_.images[i]->setTo(0);
sim_data_.images_rgb[i]->setTo(0);
sim_data_.depthmaps[i]->setTo(0); sim_data_.depthmaps[i]->setTo(0);
sim_data_.optic_flows[i]->setTo(0); sim_data_.optic_flows[i]->setTo(0);
} }
@ -241,6 +245,7 @@ namespace event_camera_simulator {
sim_data_.groundtruth.linear_velocity_obj_, sim_data_.groundtruth.linear_velocity_obj_,
sim_data_.groundtruth.angular_velocity_obj_, sim_data_.groundtruth.angular_velocity_obj_,
sim_data_.images[i], sim_data_.images[i],
sim_data_.images_rgb[i],
sim_data_.depthmaps[i], sim_data_.depthmaps[i],
sim_data_.optic_flows[i] sim_data_.optic_flows[i]
); );
@ -249,6 +254,7 @@ namespace event_camera_simulator {
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i), sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
sim_data_.groundtruth.T_W_OBJ_, sim_data_.groundtruth.T_W_OBJ_,
sim_data_.images[i], sim_data_.images[i],
sim_data_.images_rgb[i],
sim_data_.depthmaps[i] sim_data_.depthmaps[i]
); );
} }

View File

@ -19,6 +19,7 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const = 0; ) const = 0;
@ -36,6 +37,7 @@ namespace event_camera_simulator {
const std::vector<LinearVelocity>& linear_velocity_obj, const std::vector<LinearVelocity>& linear_velocity_obj,
const std::vector<AngularVelocity>& angular_velocity_obj, const std::vector<AngularVelocity>& angular_velocity_obj,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap, const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map const OpticFlowPtr& optic_flow_map
) const {} ) const {}

View File

@ -1,7 +1,7 @@
--data_source=2 --data_source=2
--path_to_output_bag=/tmp/out.bag # --path_to_output_bag=/tmp/out.bag
--path_to_data_folder=/tmp/tests/frames --path_to_data_folder=/home/arno/Videos/sponze/frames
--hdr_output_folder=/home/arno/sim_ws/out --hdr_output_folder=/home/arno/sim_ws/out
--ros_publisher_frame_rate=60 --ros_publisher_frame_rate=60

View File

@ -1,25 +1,45 @@
import argparse import argparse
from os import listdir from os import listdir
from os.path import join from os.path import join
import numpy as np
from scipy import interpolate
import csv
if __name__ == "__main__": if __name__ == '__main__':
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Generate "images.csv" for ESIM DataProviderFromFolder') description='Generate "images.csv" for ESIM DataProviderFromFolder'
)
parser.add_argument('-i', '--input_folder', default=None, type=str, parser.add_argument(
help="folder containing the images") '-i',
parser.add_argument('-r', '--framerate', default=1000, type=float, '--input_folder',
help="video framerate, in Hz") default=None,
type=str,
help='folder containing the images',
)
parser.add_argument(
'-r',
'--framerate',
default=1000,
type=float,
help='video framerate, in Hz',
)
args = parser.parse_args() args = parser.parse_args()
images = sorted( images = sorted(
[f for f in listdir(args.input_folder) if f.endswith('.png')]) [
f
for f in listdir(args.input_folder)
if f.endswith('.exr') or f.endswith('.png')
]
)
print('Will write file: {} with framerate: {} Hz'.format( print(
join(args.input_folder, 'images.csv'), args.framerate)) f"Will write timestamps in file: {join(args.input_folder, 'images.csv')} with framerate: {args.framerate} Hz"
)
stamp_nanoseconds = 1 stamp_nanoseconds = 1
dt_nanoseconds = int((1.0 / args.framerate) * 1e9) dt_nanoseconds = int((1.0 / args.framerate) * 1e9)
with open(join(args.input_folder, 'images.csv'), 'w') as f: with open(join(args.input_folder, 'images.csv'), 'w') as f:
@ -28,3 +48,38 @@ if __name__ == "__main__":
stamp_nanoseconds += dt_nanoseconds stamp_nanoseconds += dt_nanoseconds
print('Done!') print('Done!')
print(
f"Will write camera poses in file: {join(args.input_folder, 'images.csv')}"
)
fields = ['frame number', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
with open(join(args.input_folder, 'poses.csv'), 'w') as poses:
writer = csv.DictWriter(poses, fieldnames=fields)
writer.writeheader()
with open(join(args.input_folder, 'keyframes.csv'), 'r') as csv_file:
reader = csv.DictReader(csv_file)
for n, row in enumerate(reader):
for field in row:
row[field] = float(row[field])
if n == 0:
prev = row
continue
start, end = prev['frame number'], row['frame number']
time = np.arange(start, end)
data = []
for t in time:
data.append({f: 0 for f in fields})
data[-1]['frame number'] = int(t)
for var in fields[1:]:
f = interpolate.CubicHermiteSpline(
[start, end], [prev[var], row[var]], [0, 0]
)
# verify(f, var, start, end)
for i, value in enumerate(f(time)):
data[i][var] = value
prev = row
writer.writerows(data)
print('Done!')

View File

@ -100,14 +100,14 @@ namespace event_camera_simulator {
} }
void HdrPublisher::imageCallback(const ImagePtrVector& images, Time t) { void HdrPublisher::imageCallback(const ImagePtrVector& images, Time t) {
CHECK_EQ(images.size(), 1); // CHECK_EQ(images.size(), 1);
static uint frame_number = 0; // static uint frame_number = 0;
std::stringstream ss; // std::stringstream ss;
ss << output_folder_ << "/frames/frame_" << std::setfill('0') // ss << output_folder_ << "/frames/frame_" << std::setfill('0')
<< std::setw(5) << frame_number++ << ".exr"; // << std::setw(5) << frame_number++ << ".exr";
std::string frame_path = ss.str(); // std::string frame_path = ss.str();
//
cv::imwrite(frame_path, *images[0]); // cv::imwrite(frame_path, *images[0]);
} }
void HdrPublisher::eventsCallback(const EventsVector& events) { void HdrPublisher::eventsCallback(const EventsVector& events) {

View File

@ -24,8 +24,9 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const; ) const override;
//! Returns true if the rendering engine can compute optic flow, false //! Returns true if the rendering engine can compute optic flow, false
//! otherwise //! otherwise
@ -43,6 +44,7 @@ namespace event_camera_simulator {
const std::vector<LinearVelocity>& linear_velocity_obj, const std::vector<LinearVelocity>& linear_velocity_obj,
const std::vector<AngularVelocity>& angular_velocity_obj, const std::vector<AngularVelocity>& angular_velocity_obj,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap, const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map const OpticFlowPtr& optic_flow_map
) const override; ) const override;

View File

@ -17,6 +17,7 @@
#include <esim/imp_opengl_renderer/opengl_renderer.hpp> #include <esim/imp_opengl_renderer/opengl_renderer.hpp>
#include <glad/glad.h> #include <glad/glad.h>
#include <iomanip>
#include <learnopengl/shader.h> #include <learnopengl/shader.h>
#include <learnopengl/model.h> #include <learnopengl/model.h>
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
@ -314,18 +315,23 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const { ) const {
CHECK(is_initialized_) << "Called render() but the renderer was not " CHECK(is_initialized_) << "Called render() but the renderer was not "
"initialized yet. Have you " "initialized yet. Have you "
"first called setCamera()?"; "first called setCamera()?";
CHECK(out_image); CHECK(out_image);
CHECK(out_image_rgb);
CHECK(out_depthmap); CHECK(out_depthmap);
CHECK_EQ(out_image->cols, width_); CHECK_EQ(out_image->cols, width_);
CHECK_EQ(out_image_rgb->cols, width_);
CHECK_EQ(out_image->rows, height_); CHECK_EQ(out_image->rows, height_);
CHECK_EQ(out_image_rgb->rows, height_);
CHECK_EQ(out_depthmap->cols, width_); CHECK_EQ(out_depthmap->cols, width_);
CHECK_EQ(out_depthmap->rows, height_); CHECK_EQ(out_depthmap->rows, height_);
CHECK_EQ(out_image->type(), CV_32F); CHECK_EQ(out_image->type(), CV_32F);
CHECK_EQ(out_image_rgb->type(), CV_32FC3);
CHECK_EQ(out_depthmap->type(), CV_32F); CHECK_EQ(out_depthmap->type(), CV_32F);
// draw to our framebuffer instead of screen // draw to our framebuffer instead of screen
@ -413,6 +419,29 @@ namespace event_camera_simulator {
// read out what we just rendered // read out what we just rendered
cv::Mat img_color(height_, width_, CV_8UC3); cv::Mat img_color(height_, width_, CV_8UC3);
{
float pixels[height_ * width_ * 3];
glReadPixels(
0,
0,
img_color.cols,
img_color.rows,
GL_RGB,
GL_FLOAT,
pixels
);
cv::Mat rgb(height_, width_, CV_32FC3, pixels);
rgb.copyTo(*out_image_rgb);
}
// static uint frame_number = 0;
// std::stringstream ss;
// ss << "/tmp/tests"
// << "/frames/frame_" << std::setfill('0') << std::setw(5)
// << frame_number++ << ".exr";
// cv::imwrite(ss.str(), rgb);
glPixelStorei(GL_PACK_ALIGNMENT, (img_color.step & 3) ? 1 : 4); glPixelStorei(GL_PACK_ALIGNMENT, (img_color.step & 3) ? 1 : 4);
glPixelStorei( glPixelStorei(
GL_PACK_ROW_LENGTH, GL_PACK_ROW_LENGTH,
@ -480,10 +509,11 @@ namespace event_camera_simulator {
const std::vector<LinearVelocity>& linear_velocity_obj, const std::vector<LinearVelocity>& linear_velocity_obj,
const std::vector<AngularVelocity>& angular_velocity_obj, const std::vector<AngularVelocity>& angular_velocity_obj,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap, const DepthmapPtr& out_depthmap,
const OpticFlowPtr& optic_flow_map const OpticFlowPtr& optic_flow_map
) const { ) const {
render(T_W_C, T_W_OBJ, out_image, out_depthmap); render(T_W_C, T_W_OBJ, out_image, out_image_rgb, out_depthmap);
// draw to our optic flow framebuffer instead of screen // draw to our optic flow framebuffer instead of screen
glBindFramebuffer(GL_FRAMEBUFFER, fbo_of); glBindFramebuffer(GL_FRAMEBUFFER, fbo_of);

View File

@ -26,6 +26,7 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const override { ) const override {
render(T_W_C, out_image, out_depthmap); render(T_W_C, out_image, out_depthmap);

View File

@ -31,6 +31,7 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rbg,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const override { ) const override {
render(T_W_C, out_image, out_depthmap); render(T_W_C, out_image, out_depthmap);

View File

@ -21,8 +21,9 @@ namespace event_camera_simulator {
const Transformation& T_W_C, const Transformation& T_W_C,
const std::vector<Transformation>& T_W_OBJ, const std::vector<Transformation>& T_W_OBJ,
const ImagePtr& out_image, const ImagePtr& out_image,
const ImageRGBPtr& out_image_rgb,
const DepthmapPtr& out_depthmap const DepthmapPtr& out_depthmap
) const { ) const override {
render(T_W_C, out_image, out_depthmap); render(T_W_C, out_image, out_depthmap);
} }