mirror of
https://github.com/karma-riuk/hdr_esim.git
synced 2024-11-23 14:47:50 +01:00
Added the possibility to get the hdr rgb images from a data provider.
For now only made it work with 'from_folder' and 'opengl_renderer'
This commit is contained in:
parent
200975a3f0
commit
3ba975a830
@ -11,12 +11,16 @@ namespace event_camera_simulator {
|
||||
ZE_POINTER_TYPEDEFS(ImageBuffer);
|
||||
|
||||
struct ImageData {
|
||||
ImageData(Image img, Time stamp, Duration exposure_time)
|
||||
ImageData(
|
||||
Image img, ImageRGB img_rgb, Time stamp, Duration exposure_time
|
||||
)
|
||||
: image(img),
|
||||
image_rgb(img_rgb),
|
||||
stamp(stamp),
|
||||
exposure_time(exposure_time) {}
|
||||
|
||||
Image image;
|
||||
ImageRGB image_rgb;
|
||||
Time stamp;
|
||||
Duration exposure_time; // timestamp since last image (0 if this is
|
||||
// the first image)
|
||||
@ -27,7 +31,7 @@ namespace event_camera_simulator {
|
||||
// Rolling image buffer of mazimum size 'buffer_size_ns'.
|
||||
ImageBuffer(Duration buffer_size_ns): buffer_size_ns_(buffer_size_ns) {}
|
||||
|
||||
void addImage(Time t, const Image& img);
|
||||
void addImage(Time t, const Image& img, const ImageRGB& img_rgb);
|
||||
|
||||
std::deque<ImageData> getRawBuffer() const {
|
||||
return data_;
|
||||
@ -63,7 +67,10 @@ namespace event_camera_simulator {
|
||||
}
|
||||
|
||||
bool imageCallback(
|
||||
const Image& img, Time time, const ImagePtr& camera_image
|
||||
const Image& img,
|
||||
const ImageRGB& img_rgb,
|
||||
Time time,
|
||||
const ImagePtr& camera_image
|
||||
);
|
||||
|
||||
private:
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <esim/esim/camera_simulator.hpp>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <ze/common/file_utils.hpp>
|
||||
|
||||
@ -12,7 +13,8 @@ static std::ofstream exposures_file_;
|
||||
|
||||
namespace event_camera_simulator {
|
||||
|
||||
void ImageBuffer::addImage(Time t, const Image& img) {
|
||||
void
|
||||
ImageBuffer::addImage(Time t, const Image& img, const ImageRGB& img_rgb) {
|
||||
if (!data_.empty()) {
|
||||
// Check that the image timestamps are monotonically increasing
|
||||
CHECK_GT(t, data_.back().stamp);
|
||||
@ -21,7 +23,9 @@ namespace event_camera_simulator {
|
||||
Duration exposure_time = data_.empty() ? 0 : t - data_.back().stamp;
|
||||
VLOG(2) << "Adding image to buffer with stamp: " << t
|
||||
<< " and exposure time " << exposure_time;
|
||||
data_.push_back(ImageData(img.clone(), t, exposure_time));
|
||||
data_.push_back(
|
||||
ImageData(img.clone(), img_rgb.clone(), t, exposure_time)
|
||||
);
|
||||
|
||||
// Remove all the images with timestamp older than t - buffer_size_ns_
|
||||
auto first_valid_element = std::lower_bound(
|
||||
@ -40,12 +44,16 @@ namespace event_camera_simulator {
|
||||
}
|
||||
|
||||
bool CameraSimulator::imageCallback(
|
||||
const Image& img, Time time, const ImagePtr& camera_image
|
||||
const Image& img,
|
||||
const ImageRGB& img_rgb,
|
||||
Time time,
|
||||
const ImagePtr& camera_image
|
||||
) {
|
||||
CHECK(camera_image);
|
||||
CHECK_EQ(camera_image->size(), img.size());
|
||||
CHECK_EQ(img_rgb.size(), img.size());
|
||||
|
||||
buffer_->addImage(time, img);
|
||||
buffer_->addImage(time, img, img_rgb);
|
||||
|
||||
static const Time initial_time = time;
|
||||
if (time - initial_time < exposure_time_) {
|
||||
@ -55,10 +63,15 @@ namespace event_camera_simulator {
|
||||
return false;
|
||||
}
|
||||
|
||||
ImageRGB rgb(img.rows, img.cols, CV_32FC3);
|
||||
rgb.setTo(0.);
|
||||
|
||||
// average all the images in the buffer to simulate motion blur
|
||||
camera_image->setTo(0);
|
||||
ze::real_t denom = 0.;
|
||||
for (const ImageBuffer::ImageData& img : buffer_->getRawBuffer()) {
|
||||
rgb +=
|
||||
ze::nanosecToMillisecTrunc(img.exposure_time) * img.image_rgb;
|
||||
*camera_image +=
|
||||
ze::nanosecToMillisecTrunc(img.exposure_time) * img.image;
|
||||
denom += ze::nanosecToMillisecTrunc(img.exposure_time);
|
||||
@ -70,7 +83,7 @@ namespace event_camera_simulator {
|
||||
ss << hdr_output_folder << "/frames/frame_" << std::setfill('0')
|
||||
<< std::setw(5) << frame_number++ << ".exr";
|
||||
std::string frame_path = ss.str();
|
||||
cv::imwrite(frame_path, *camera_image);
|
||||
cv::imwrite(frame_path, rgb);
|
||||
|
||||
// ze::openOutputFileStream(
|
||||
// ze::joinPath(output_folder, "exposures.csv"),
|
||||
|
@ -46,6 +46,7 @@ namespace event_camera_simulator {
|
||||
camera_simulator_success =
|
||||
camera_simulators_[i].imageCallback(
|
||||
*sim_data.images[i],
|
||||
*sim_data.images_rgb[i],
|
||||
time,
|
||||
corrupted_camera_images_[i]
|
||||
);
|
||||
|
@ -42,12 +42,15 @@ namespace event_camera_simulator {
|
||||
using Duration = ze::uint64_t;
|
||||
using Image = cv::Mat_<ImageFloatType>;
|
||||
using ImagePtr = std::shared_ptr<Image>;
|
||||
using ImageRGB = cv::Mat;
|
||||
using ImageRGBPtr = std::shared_ptr<ImageRGB>;
|
||||
using Depthmap = cv::Mat_<ImageFloatType>;
|
||||
using OpticFlow = cv::Mat_<cv::Vec<ImageFloatType, 2>>;
|
||||
using OpticFlowPtr = std::shared_ptr<OpticFlow>;
|
||||
using DepthmapPtr = std::shared_ptr<Depthmap>;
|
||||
|
||||
using ImagePtrVector = std::vector<ImagePtr>;
|
||||
using ImageRGBPtrVector = std::vector<ImageRGBPtr>;
|
||||
using DepthmapPtrVector = std::vector<DepthmapPtr>;
|
||||
using OpticFlowPtrVector = std::vector<OpticFlowPtr>;
|
||||
|
||||
@ -98,6 +101,7 @@ namespace event_camera_simulator {
|
||||
|
||||
//! Camera images.
|
||||
ImagePtrVector images;
|
||||
ImageRGBPtrVector images_rgb;
|
||||
|
||||
//! Depth maps.
|
||||
DepthmapPtrVector depthmaps;
|
||||
|
@ -1,4 +1,11 @@
|
||||
#include "esim/imp_multi_objects_2d/object.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/types_c.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <esim/data_provider/data_provider_from_folder.hpp>
|
||||
#include <iostream>
|
||||
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||
#include <ze/cameras/camera_impl.hpp>
|
||||
#include <ze/common/file_utils.hpp>
|
||||
@ -40,6 +47,11 @@ namespace event_camera_simulator {
|
||||
sim_data_.images.emplace_back(ImagePtr(new Image(
|
||||
cv::Size(camera_rig_->at(0).width(), camera_rig_->at(0).height())
|
||||
)));
|
||||
sim_data_.images_rgb.emplace_back(ImageRGBPtr(new ImageRGB(
|
||||
camera_rig_->at(0).width(),
|
||||
camera_rig_->at(0).height(),
|
||||
CV_32FC3
|
||||
)));
|
||||
|
||||
sim_data_.camera_rig = camera_rig_;
|
||||
sim_data_.images_updated = true;
|
||||
@ -74,12 +86,17 @@ namespace event_camera_simulator {
|
||||
|
||||
const std::string& path_to_image =
|
||||
ze::joinPath(path_to_data_folder_, items[1]);
|
||||
cv::Mat image = cv::imread(path_to_image, 0);
|
||||
cv::Mat image = cv::imread(path_to_image);
|
||||
CHECK(image.data)
|
||||
<< "Could not load image from file: " << path_to_image;
|
||||
|
||||
VLOG(3) << "Read image from file: " << path_to_image;
|
||||
image.convertTo(
|
||||
image.copyTo(*sim_data_.images_rgb[0]);
|
||||
|
||||
cv::Mat image_gray = image;
|
||||
cv::cvtColor(image, image_gray, CV_BGR2GRAY);
|
||||
|
||||
image_gray.convertTo(
|
||||
*sim_data_.images[0],
|
||||
cv::DataType<ImageFloatType>::type,
|
||||
1. / 255.
|
||||
|
@ -103,11 +103,15 @@ namespace event_camera_simulator {
|
||||
);
|
||||
|
||||
sim_data_.images.emplace_back(ImagePtr(new Image(size)));
|
||||
sim_data_.images_rgb.emplace_back(
|
||||
ImageRGBPtr(new ImageRGB(size, CV_32FC3))
|
||||
);
|
||||
sim_data_.depthmaps.emplace_back(DepthmapPtr(new Depthmap(size)));
|
||||
sim_data_.optic_flows.emplace_back(OpticFlowPtr(new OpticFlow(size))
|
||||
);
|
||||
|
||||
sim_data_.images[i]->setTo(0);
|
||||
sim_data_.images_rgb[i]->setTo(0);
|
||||
sim_data_.depthmaps[i]->setTo(0);
|
||||
sim_data_.optic_flows[i]->setTo(0);
|
||||
}
|
||||
@ -241,6 +245,7 @@ namespace event_camera_simulator {
|
||||
sim_data_.groundtruth.linear_velocity_obj_,
|
||||
sim_data_.groundtruth.angular_velocity_obj_,
|
||||
sim_data_.images[i],
|
||||
sim_data_.images_rgb[i],
|
||||
sim_data_.depthmaps[i],
|
||||
sim_data_.optic_flows[i]
|
||||
);
|
||||
@ -249,6 +254,7 @@ namespace event_camera_simulator {
|
||||
sim_data_.groundtruth.T_W_B * camera_rig_->T_B_C(i),
|
||||
sim_data_.groundtruth.T_W_OBJ_,
|
||||
sim_data_.images[i],
|
||||
sim_data_.images_rgb[i],
|
||||
sim_data_.depthmaps[i]
|
||||
);
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const = 0;
|
||||
|
||||
@ -36,6 +37,7 @@ namespace event_camera_simulator {
|
||||
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const {}
|
||||
|
@ -24,8 +24,9 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const;
|
||||
) const override;
|
||||
|
||||
//! Returns true if the rendering engine can compute optic flow, false
|
||||
//! otherwise
|
||||
@ -43,6 +44,7 @@ namespace event_camera_simulator {
|
||||
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const override;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <esim/imp_opengl_renderer/opengl_renderer.hpp>
|
||||
|
||||
#include <glad/glad.h>
|
||||
#include <iomanip>
|
||||
#include <learnopengl/shader.h>
|
||||
#include <learnopengl/model.h>
|
||||
#include <GLFW/glfw3.h>
|
||||
@ -314,18 +315,23 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
CHECK(is_initialized_) << "Called render() but the renderer was not "
|
||||
"initialized yet. Have you "
|
||||
"first called setCamera()?";
|
||||
CHECK(out_image);
|
||||
CHECK(out_image_rgb);
|
||||
CHECK(out_depthmap);
|
||||
CHECK_EQ(out_image->cols, width_);
|
||||
CHECK_EQ(out_image_rgb->cols, width_);
|
||||
CHECK_EQ(out_image->rows, height_);
|
||||
CHECK_EQ(out_image_rgb->rows, height_);
|
||||
CHECK_EQ(out_depthmap->cols, width_);
|
||||
CHECK_EQ(out_depthmap->rows, height_);
|
||||
CHECK_EQ(out_image->type(), CV_32F);
|
||||
CHECK_EQ(out_image_rgb->type(), CV_32FC3);
|
||||
CHECK_EQ(out_depthmap->type(), CV_32F);
|
||||
|
||||
// draw to our framebuffer instead of screen
|
||||
@ -413,6 +419,29 @@ namespace event_camera_simulator {
|
||||
|
||||
// read out what we just rendered
|
||||
cv::Mat img_color(height_, width_, CV_8UC3);
|
||||
{
|
||||
float pixels[height_ * width_ * 3];
|
||||
glReadPixels(
|
||||
0,
|
||||
0,
|
||||
img_color.cols,
|
||||
img_color.rows,
|
||||
GL_RGB,
|
||||
GL_FLOAT,
|
||||
pixels
|
||||
);
|
||||
|
||||
cv::Mat rgb(height_, width_, CV_32FC3, pixels);
|
||||
rgb.copyTo(*out_image_rgb);
|
||||
}
|
||||
// static uint frame_number = 0;
|
||||
|
||||
// std::stringstream ss;
|
||||
// ss << "/tmp/tests"
|
||||
// << "/frames/frame_" << std::setfill('0') << std::setw(5)
|
||||
// << frame_number++ << ".exr";
|
||||
// cv::imwrite(ss.str(), rgb);
|
||||
|
||||
glPixelStorei(GL_PACK_ALIGNMENT, (img_color.step & 3) ? 1 : 4);
|
||||
glPixelStorei(
|
||||
GL_PACK_ROW_LENGTH,
|
||||
@ -480,10 +509,11 @@ namespace event_camera_simulator {
|
||||
const std::vector<LinearVelocity>& linear_velocity_obj,
|
||||
const std::vector<AngularVelocity>& angular_velocity_obj,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap,
|
||||
const OpticFlowPtr& optic_flow_map
|
||||
) const {
|
||||
render(T_W_C, T_W_OBJ, out_image, out_depthmap);
|
||||
render(T_W_C, T_W_OBJ, out_image, out_image_rgb, out_depthmap);
|
||||
|
||||
// draw to our optic flow framebuffer instead of screen
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, fbo_of);
|
||||
|
@ -26,6 +26,7 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const override {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
|
@ -31,6 +31,7 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rbg,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const override {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
|
@ -21,8 +21,9 @@ namespace event_camera_simulator {
|
||||
const Transformation& T_W_C,
|
||||
const std::vector<Transformation>& T_W_OBJ,
|
||||
const ImagePtr& out_image,
|
||||
const ImageRGBPtr& out_image_rgb,
|
||||
const DepthmapPtr& out_depthmap
|
||||
) const {
|
||||
) const override {
|
||||
render(T_W_C, out_image, out_depthmap);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user