diff --git a/CMakeLists.txt b/CMakeLists.txt index 150a018f8a..fb53eeacac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -268,6 +268,9 @@ OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" (NOT ANDROID) OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON VISIBLE_IF NOT ANDROID AND NOT IOS AND NOT XROS AND NOT WINRT VERIFY HAVE_GSTREAMER AND GSTREAMER_VERSION VERSION_GREATER "0.99") +OCV_OPTION(WITH_LIBCAMERA "Include Libcamera support" ON + VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID + VERIFY HAVE_LIBCAMERA OR HAVE_VIDEOIO) OCV_OPTION(WITH_GTK "Include GTK support" ON VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID VERIFY HAVE_GTK) @@ -1720,6 +1723,13 @@ if(ANDROID) status(" NDK Camera:" HAVE_ANDROID_NATIVE_CAMERA THEN "YES" ELSE NO) endif() +if(WITH_LIBCAMERA OR HAVE_LIBCAMERA) + ocv_build_features_string(libcamera_status + IF HAVE_LIBCAMERA THEN "libcamera/libcamera/libcamera.h" + ELSE "NO") + status(" libcamera:" HAVE_LIBCAMERA THEN YES ELSE NO) +endif() + # Order is similar to CV_PARALLEL_FRAMEWORK in core/src/parallel.cpp ocv_build_features_string(parallel_status EXCLUSIVE IF HAVE_TBB THEN "TBB (ver ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} interface ${TBB_INTERFACE_VERSION})" diff --git a/modules/videoio/CMakeLists.txt b/modules/videoio/CMakeLists.txt index c8a03e72ae..b60b07c373 100644 --- a/modules/videoio/CMakeLists.txt +++ b/modules/videoio/CMakeLists.txt @@ -108,6 +108,11 @@ if(TARGET ocv.3rdparty.dshow) list(APPEND tgts ocv.3rdparty.dshow) endif() +if(TARGET ocv.3rdparty.libcamera) + list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_libcamera.cpp) + list(APPEND tgts ocv.3rdparty.libcamera) +endif() + if(TARGET ocv.3rdparty.msmf) if("msmf" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all") ocv_create_builtin_videoio_plugin("opencv_videoio_msmf" ocv.3rdparty.msmf "cap_msmf.cpp") diff --git a/modules/videoio/cmake/detect_libcamera.cmake b/modules/videoio/cmake/detect_libcamera.cmake new file mode 100644 index 0000000000..a8e1ae2530 --- /dev/null +++ b/modules/videoio/cmake/detect_libcamera.cmake @@ -0,0 +1,19 @@ +# --- Libcamera --- + +if(NOT HAVE_LIBCAMERA AND PKG_CONFIG_FOUND) + ocv_check_modules(LIBCAMERA libcamera) + if(LIBCAMERA_FOUND) + set(HAVE_LIBCAMERA TRUE) + endif() +endif() + +if(HAVE_LIBCAMERA) + if((CMAKE_CXX_STANDARD EQUAL 98) OR (CMAKE_CXX_STANDARD LESS 17)) + message(STATUS "CMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} is too old to support libcamera. Use C++17 or later. Turning HAVE_LIBCAMERA off") + set(HAVE_LIBCAMERA FALSE) + endif() +endif() + +if(HAVE_LIBCAMERA) + ocv_add_external_target(libcamera "${LIBCAMERA_INCLUDE_DIRS}" "${LIBCAMERA_LINK_LIBRARIES}" "HAVE_LIBCAMERA") +endif() diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake index 2237b97a09..5bb9e06959 100644 --- a/modules/videoio/cmake/init.cmake +++ b/modules/videoio/cmake/init.cmake @@ -11,6 +11,7 @@ endmacro() add_backend("ffmpeg" WITH_FFMPEG) add_backend("gstreamer" WITH_GSTREAMER) add_backend("v4l" WITH_V4L) +add_backend("libcamera" WITH_LIBCAMERA) add_backend("aravis" WITH_ARAVIS) add_backend("dc1394" WITH_1394) diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp index bdda38e5e2..3da230d931 100644 --- a/modules/videoio/include/opencv2/videoio.hpp +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -128,7 +128,9 @@ enum VideoCaptureAPIs { CAP_INTEL_MFX = 2300, //!< Intel MediaSDK CAP_XINE = 2400, //!< XINE engine (Linux) CAP_UEYE = 2500, //!< uEye Camera API - CAP_OBSENSOR = 2600, //!< For Orbbec 3D-Sensor device/module (Astra+, Femto, Astra2, Gemini2, Gemini2L, Gemini2XL, Gemini330, Femto Mega) attention: Astra2 cameras currently only support Windows and Linux kernel versions no higher than 4.15, and higher versions of Linux kernel may have exceptions. + CAP_OBSENSOR = 2600, //!< For Orbbec 3D-Sensor device/module (Astra+, Femto, Astra2, Gemini2, Gemini2L, Gemini2XL, Femto Mega) attention: Astra2 cameras currently only support Windows and Linux kernel versions no higher than 4.15, and higher versions of Linux kernel may have exceptions. + CAP_LIBCAMERA = 2700, //!< Libcamera API + }; diff --git a/modules/videoio/src/cap_interface.hpp b/modules/videoio/src/cap_interface.hpp index a1924f5682..e17a32770b 100644 --- a/modules/videoio/src/cap_interface.hpp +++ b/modules/videoio/src/cap_interface.hpp @@ -402,6 +402,10 @@ Ptr createAndroidVideoWriter(const std::string& filename, int four Ptr create_obsensor_capture(int index); +Ptr createLibcameraCapture_cam(int index); +Ptr createLibcameraCapture_file(const std::string &filename); + + bool VideoCapture_V4L_waitAny( const std::vector& streams, CV_OUT std::vector& ready, diff --git a/modules/videoio/src/cap_libcamera.cpp b/modules/videoio/src/cap_libcamera.cpp new file mode 100644 index 0000000000..38da35e6ab --- /dev/null +++ b/modules/videoio/src/cap_libcamera.cpp @@ -0,0 +1,1186 @@ +#include "precomp.hpp" +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cap_libcamera.hpp" +#include + +/** + * @brief implementation of the LibcameraApp class and LibcameraCapture + * The LibcameraApp implements is from LCCV + * Source: https://github.com/kbarni/LCCV + + + +*/ + +namespace cv +{ + + LibcameraApp::LibcameraApp(std::unique_ptr opts) + : options_(std::move(opts)), controls_(controls::controls) + + { + if (!options_) + options_ = std::make_unique(); + controls_.clear(); + } + + LibcameraApp::~LibcameraApp() + { + + StopCamera(); + Teardown(); + CloseCamera(); + std::cerr << "End of ~LibcameraApp() call" << std::endl; + } + + std::string const &LibcameraApp::CameraId() const + { + return camera_->id(); + } + + void LibcameraApp::OpenCamera() + { + + if (options_->verbose) + std::cerr << "Opening camera..." << std::endl; + + if (getCameraManager()->cameras().size() == 0) + throw std::runtime_error("no cameras available"); + if (options_->camera >= getCameraManager()->cameras().size()) + throw std::runtime_error("selected camera is not available"); + + std::string const &cam_id = getCameraManager()->cameras()[options_->camera]->id(); + camera_ = getCameraManager()->get(cam_id); + if (!camera_) + throw std::runtime_error("failed to find camera " + cam_id); + + if (!camera_acquired_ && camera_->acquire()) + throw std::runtime_error("failed to acquire camera " + cam_id); + camera_acquired_ = true; + + if (options_->verbose) + std::cerr << "Acquired camera " << cam_id << std::endl; + } + + void LibcameraApp::CloseCamera() + { + if (camera_acquired_) + camera_->release(); + camera_acquired_ = false; + + camera_.reset(); + + if (options_->verbose && !options_->help) + std::cerr << "Camera closed" << std::endl; + } + + // void LibcameraApp::CloseCamera() { + // std::lock_guard lock(camera_stop_mutex_); + // if (camera_acquired_) { + // try { + // camera_->release(); + // camera_acquired_ = false; + // camera_.reset(); + // } catch (const std::exception& e) { + // std::cerr << "Error releasing camera: " << e.what() << std::endl; + // } + // } + // } + + void LibcameraApp::ConfigureViewfinder() + { + if (options_->verbose) + std::cerr << "Configuring viewfinder..." << std::endl; + + StreamRoles stream_roles = {StreamRole::Viewfinder}; + configuration_ = camera_->generateConfiguration(stream_roles); + if (!configuration_) + throw std::runtime_error("failed to generate viewfinder configuration"); + + // Now we get to override any of the default settings from the options_-> + configuration_->at(0).pixelFormat = libcamera::formats::RGB888; + configuration_->at(0).size.width = options_->video_width; + configuration_->at(0).size.height = options_->video_height; + configuration_->at(0).bufferCount = 4; + + // configuration_->transform = options_->transform; + + configureDenoise(options_->denoise == "auto" ? "cdn_off" : options_->denoise); + setupCapture(); + + streams_["viewfinder"] = configuration_->at(0).stream(); + + if (options_->verbose) + std::cerr << "Viewfinder setup complete" << std::endl; + } + + void LibcameraApp::Teardown() + { + if (options_->verbose && !options_->help) + std::cerr << "Tearing down requests, buffers and configuration" << std::endl; + + for (auto &iter : mapped_buffers_) + { + // assert(iter.first->planes().size() == iter.second.size()); + // for (unsigned i = 0; i < iter.first->planes().size(); i++) + for (auto &span : iter.second) + munmap(span.data(), span.size()); + } + mapped_buffers_.clear(); + + delete allocator_; + allocator_ = nullptr; + + configuration_.reset(); + + frame_buffers_.clear(); + + streams_.clear(); + } + + void LibcameraApp::StartCamera() + { + // This makes all the Request objects that we shall need. + makeRequests(); + + // Build a list of initial controls that we must set in the camera before starting it. + // We don't overwrite anything the application may have set before calling us. + if (!controls_.get(controls::ScalerCrop) && options_->roi_width != 0 && options_->roi_height != 0) + { + Rectangle sensor_area = *camera_->properties().get(properties::ScalerCropMaximum); + int x = options_->roi_x * sensor_area.width; + int y = options_->roi_y * sensor_area.height; + int w = options_->roi_width * sensor_area.width; + int h = options_->roi_height * sensor_area.height; + Rectangle crop(x, y, w, h); + crop.translateBy(sensor_area.topLeft()); + if (options_->verbose) + std::cerr << "Using crop " << crop.toString() << std::endl; + controls_.set(controls::ScalerCrop, crop); + } + + // Framerate is a bit weird. If it was set programmatically, we go with that, but + // otherwise it applies only to preview/video modes. For stills capture we set it + // as long as possible so that we get whatever the exposure profile wants. + if (!controls_.get(controls::FrameDurationLimits)) + { + if (StillStream()) + controls_.set(controls::FrameDurationLimits, libcamera::Span({INT64_C(100), INT64_C(1000000000)})); + else if (options_->framerate > 0) + { + int64_t frame_time = 1000000 / options_->framerate; // in us + controls_.set(controls::FrameDurationLimits, libcamera::Span({frame_time, frame_time})); + } + } + + if (!controls_.get(controls::ExposureTime) && options_->shutter) + controls_.set(controls::ExposureTime, options_->shutter); + if (!controls_.get(controls::AnalogueGain) && options_->gain) + controls_.set(controls::AnalogueGain, options_->gain); + if (!controls_.get(controls::AeMeteringMode)) + controls_.set(controls::AeMeteringMode, options_->getMeteringMode()); + if (!controls_.get(controls::AeExposureMode)) + controls_.set(controls::AeExposureMode, options_->getExposureMode()); + if (!controls_.get(controls::ExposureValue)) + controls_.set(controls::ExposureValue, options_->ev); + if (!controls_.get(controls::AwbMode)) + controls_.set(controls::AwbMode, options_->getWhiteBalance()); + if (!controls_.get(controls::ColourGains) && options_->awb_gain_r && options_->awb_gain_b) + controls_.set(controls::ColourGains, libcamera::Span({options_->awb_gain_r, options_->awb_gain_b})); + if (!controls_.get(controls::Brightness)) + controls_.set(controls::Brightness, options_->brightness); + if (!controls_.get(controls::Contrast)) + controls_.set(controls::Contrast, options_->contrast); + if (!controls_.get(controls::Saturation)) + controls_.set(controls::Saturation, options_->saturation); + if (!controls_.get(controls::Sharpness)) + controls_.set(controls::Sharpness, options_->sharpness); + + if (camera_->start(&controls_)) + throw std::runtime_error("failed to start camera"); + controls_.clear(); + camera_started_ = true; + last_timestamp_ = 0; + + camera_->requestCompleted.connect(this, &LibcameraApp::requestComplete); + + for (std::unique_ptr &request : requests_) + { + if (camera_->queueRequest(request.get()) < 0) + throw std::runtime_error("Failed to queue request"); + } + + if (options_->verbose) + std::cerr << "Camera started!" << std::endl; + } + + void LibcameraApp::StopCamera() + { + { + // We don't want QueueRequest to run asynchronously while we stop the camera. + std::lock_guard lock(camera_stop_mutex_); + if (camera_started_) + { + std::cerr << "Camera tries to stop!!" << std::endl; + if (camera_->stop()) + throw std::runtime_error("failed to stop camera"); + + camera_started_ = false; + } + // camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete); + // if (!camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete)) { + // throw std::runtime_error("failed to disconnect camera callbacks"); + // } + } + + if (camera_) + camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete); + + // An application might be holding a CompletedRequest, so queueRequest will get + // called to delete it later, but we need to know not to try and re-queue it. + completed_requests_.clear(); + + msg_queue_.Clear(); + + while (!free_requests_.empty()) + free_requests_.pop(); + + requests_.clear(); + + controls_.clear(); // no need for mutex here + + if (options_->verbose && !options_->help) + std::cerr << "Camera stopped!" << std::endl; + } + + void LibcameraApp::ApplyRoiSettings() + { + if (!controls_.get(controls::ScalerCrop) && options_->roi_width != 0 && options_->roi_height != 0) + { + Rectangle sensor_area = *camera_->properties().get(properties::ScalerCropMaximum); + int x = options_->roi_x * sensor_area.width; + int y = options_->roi_y * sensor_area.height; + int w = options_->roi_width * sensor_area.width; + int h = options_->roi_height * sensor_area.height; + Rectangle crop(x, y, w, h); + crop.translateBy(sensor_area.topLeft()); + if (options_->verbose) + std::cerr << "Using crop " << crop.toString() << std::endl; + controls_.set(controls::ScalerCrop, crop); + } + } + + LibcameraApp::Msg LibcameraApp::Wait() + { + return msg_queue_.Wait(); + } + + void LibcameraApp::queueRequest(CompletedRequest *completed_request) + { + BufferMap buffers(std::move(completed_request->buffers)); + + Request *request = completed_request->request; + assert(request); + + // This function may run asynchronously so needs protection from the + // camera stopping at the same time. + std::lock_guard stop_lock(camera_stop_mutex_); + if (!camera_started_) + return; + + // An application could be holding a CompletedRequest while it stops and re-starts + // the camera, after which we don't want to queue another request now. + { + std::lock_guard lock(completed_requests_mutex_); + auto it = completed_requests_.find(completed_request); + delete completed_request; + if (it == completed_requests_.end()) + return; + completed_requests_.erase(it); + } + + for (auto const &p : buffers) + { + if (request->addBuffer(p.first, p.second) < 0) + throw std::runtime_error("failed to add buffer to request in QueueRequest"); + } + + { + std::lock_guard lock(control_mutex_); + request->controls() = std::move(controls_); + } + + if (camera_->queueRequest(request) < 0) + throw std::runtime_error("failed to queue request"); + } + + void LibcameraApp::PostMessage(MsgType &t, MsgPayload &p) + { + msg_queue_.Post(Msg(t, std::move(p))); + } + + libcamera::Stream *LibcameraApp::GetStream(std::string const &name, unsigned int *w, unsigned int *h, + unsigned int *stride) const + { + auto it = streams_.find(name); + if (it == streams_.end()) + return nullptr; + StreamDimensions(it->second, w, h, stride); + return it->second; + } + + libcamera::Stream *LibcameraApp::ViewfinderStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("viewfinder", w, h, stride); + } + + libcamera::Stream *LibcameraApp::StillStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("still", w, h, stride); + } + + libcamera::Stream *LibcameraApp::RawStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("raw", w, h, stride); + } + + libcamera::Stream *LibcameraApp::VideoStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("video", w, h, stride); + } + + libcamera::Stream *LibcameraApp::LoresStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("lores", w, h, stride); + } + + libcamera::Stream *LibcameraApp::GetMainStream() const + { + for (auto &p : streams_) + { + if (p.first == "viewfinder" || p.first == "still" || p.first == "video") + return p.second; + } + + return nullptr; + } + + std::vector> LibcameraApp::Mmap(FrameBuffer *buffer) const + { + auto item = mapped_buffers_.find(buffer); + if (item == mapped_buffers_.end()) + return {}; + return item->second; + } + + void LibcameraApp::SetControls(ControlList &controls) + { + std::lock_guard lock(control_mutex_); + controls_ = std::move(controls); + } + + void LibcameraApp::StreamDimensions(Stream const *stream, unsigned int *w, unsigned int *h, unsigned int *stride) const + { + StreamConfiguration const &cfg = stream->configuration(); + if (w) + *w = cfg.size.width; + if (h) + *h = cfg.size.height; + if (stride) + *stride = cfg.stride; + } + + void LibcameraApp::setupCapture() + { + // First finish setting up the configuration. + + CameraConfiguration::Status validation = configuration_->validate(); + if (validation == CameraConfiguration::Invalid) + throw std::runtime_error("failed to valid stream configurations"); + else if (validation == CameraConfiguration::Adjusted) + std::cerr << "Stream configuration adjusted" << std::endl; + + if (camera_->configure(configuration_.get()) < 0) + throw std::runtime_error("failed to configure streams"); + + if (options_->verbose) + std::cerr << "Camera streams configured" << std::endl; + + // Next allocate all the buffers we need, mmap them and store them on a free list. + + allocator_ = new FrameBufferAllocator(camera_); + for (StreamConfiguration &config : *configuration_) + { + Stream *stream = config.stream(); + + if (allocator_->allocate(stream) < 0) + throw std::runtime_error("failed to allocate capture buffers"); + + for (const std::unique_ptr &buffer : allocator_->buffers(stream)) + { + // "Single plane" buffers appear as multi-plane here, but we can spot them because then + // planes all share the same fd. We accumulate them so as to mmap the buffer only once. + size_t buffer_size = 0; + for (unsigned i = 0; i < buffer->planes().size(); i++) + { + const FrameBuffer::Plane &plane = buffer->planes()[i]; + buffer_size += plane.length; + if (i == buffer->planes().size() - 1 || plane.fd.get() != buffer->planes()[i + 1].fd.get()) + { + void *memory = mmap(NULL, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, plane.fd.get(), 0); + mapped_buffers_[buffer.get()].push_back( + libcamera::Span(static_cast(memory), buffer_size)); + buffer_size = 0; + } + } + frame_buffers_[stream].push(buffer.get()); + } + } + if (options_->verbose) + std::cerr << "Buffers allocated and mapped" << std::endl; + + // The requests will be made when StartCamera() is called. + } + + void LibcameraApp::makeRequests() + { + auto free_buffers(frame_buffers_); + while (true) + { + for (StreamConfiguration &config : *configuration_) + { + Stream *stream = config.stream(); + if (stream == configuration_->at(0).stream()) + { + if (free_buffers[stream].empty()) + { + if (options_->verbose) + std::cerr << "Requests created" << std::endl; + return; + } + std::unique_ptr request = camera_->createRequest(); + if (!request) + throw std::runtime_error("failed to make request"); + requests_.push_back(std::move(request)); + } + else if (free_buffers[stream].empty()) + throw std::runtime_error("concurrent streams need matching numbers of buffers"); + + FrameBuffer *buffer = free_buffers[stream].front(); + free_buffers[stream].pop(); + if (requests_.back()->addBuffer(stream, buffer) < 0) + throw std::runtime_error("failed to add buffer to request"); + } + } + } + + void LibcameraApp::requestComplete(Request *request) + { + if (request->status() == Request::RequestCancelled) + return; + + CompletedRequest *r = new CompletedRequest(sequence_++, request); + CompletedRequestPtr payload(r, [this](CompletedRequest *cr) + { this->queueRequest(cr); }); + { + std::lock_guard lock(completed_requests_mutex_); + completed_requests_.insert(r); + } + + // We calculate the instantaneous framerate in case anyone wants it. + uint64_t timestamp = payload->buffers.begin()->second->metadata().timestamp; + if (last_timestamp_ == 0 || last_timestamp_ == timestamp) + payload->framerate = 0; + else + payload->framerate = 1e9 / (timestamp - last_timestamp_); + last_timestamp_ = timestamp; + + msg_queue_.Post(Msg(MsgType::RequestComplete, std::move(payload))); + } + + void LibcameraApp::configureDenoise(const std::string &denoise_mode) + { + using namespace libcamera::controls::draft; + + static const std::map denoise_table = { + {"off", NoiseReductionModeOff}, + {"cdn_off", NoiseReductionModeMinimal}, + {"cdn_fast", NoiseReductionModeFast}, + {"cdn_hq", NoiseReductionModeHighQuality}}; + NoiseReductionModeEnum denoise; + + auto const mode = denoise_table.find(denoise_mode); + if (mode == denoise_table.end()) + throw std::runtime_error("Invalid denoise mode " + denoise_mode); + denoise = mode->second; + + controls_.set(NoiseReductionMode, denoise); + } + + /* ******************************************************************* */ + class LibcameraCapture CV_FINAL : public IVideoCapture + { + private: + public: + LibcameraCapture(); + virtual ~LibcameraCapture() CV_OVERRIDE; + + Options *options; + + bool startVideo(); + bool getVideoFrame(cv::Mat &frame, unsigned int timeout); + void stopVideo(); + + bool open(int _index); + bool open(const std::string &filename); + + virtual bool grabFrame() CV_OVERRIDE; + virtual bool retrieveFrame(int /*unused*/, OutputArray dst) CV_OVERRIDE; + virtual double getProperty(int propId) const CV_OVERRIDE; + virtual bool setProperty(int propId, double value) CV_OVERRIDE; + // virtual bool isOpened() const CV_OVERRIDE { return (bool)pipeline; } + virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_LIBCAMERA; } // Need to modify videoio.hpp/enum VideoCaptureAPIs + // bool configureHW(const cv::VideoCaptureParameters&); + // bool configureStreamsProperty(const cv::VideoCaptureParameters&); + bool isOpened() const CV_OVERRIDE + { + return true; + } // camerastarted + + protected: + LibcameraApp *app; + static void *videoThreadFunc(void *p); + pthread_t videothread; + unsigned int still_flags; + unsigned int vw, vh, vstr; + std::atomic running, frameready; + uint8_t *framebuffer; + std::mutex mtx; + std::condition_variable cv; + + // bool isFramePending; + // bool needsReconfigure; + std::atomic isFramePending, needsReconfigure; + bool camerastarted; + }; + + LibcameraCapture::LibcameraCapture() + { + app = new LibcameraApp(std::make_unique()); + options = static_cast(app->GetOptions()); + still_flags = LibcameraApp::FLAG_STILL_NONE; + options->photo_width = 4056; + options->photo_height = 3040; + options->video_width = 640; + options->video_height = 480; + options->framerate = 30; + options->denoise = "auto"; + options->timeout = 1000; + options->setMetering(Metering_Modes::METERING_MATRIX); + options->setExposureMode(Exposure_Modes::EXPOSURE_NORMAL); + options->setWhiteBalance(WhiteBalance_Modes::WB_AUTO); + options->contrast = 1.0f; + options->saturation = 1.0f; + still_flags |= LibcameraApp::FLAG_STILL_RGB; + running.store(false, std::memory_order_release); + ; + frameready.store(false, std::memory_order_release); + ; + framebuffer = nullptr; + // isFramePending=false; + isFramePending.store(false, std::memory_order_release); + needsReconfigure.store(false, std::memory_order_release); + camerastarted = false; + } + + LibcameraCapture::~LibcameraCapture() + { + stopVideo(); + // delete app; + std::cerr << "End of ~LibcameraCapture() call" << std::endl; + } + + // using namespace LibcameraApp; + + void *LibcameraCapture::videoThreadFunc(void *p) // not resolved + { + LibcameraCapture *t = (LibcameraCapture *)p; + t->running.store(true, std::memory_order_release); + // allocate framebuffer + // unsigned int vw,vh,vstr; + libcamera::Stream *stream = t->app->ViewfinderStream(&t->vw, &t->vh, &t->vstr); + int buffersize = t->vh * t->vstr; + if (t->framebuffer) + delete[] t->framebuffer; + t->framebuffer = new uint8_t[buffersize]; + std::vector> mem; + std::cerr << "Time to start video thread loop" << std::endl; + // main loop + while (t->running.load(std::memory_order_acquire)) + { + // std::cerr << "Wating for msg..." << std::endl; + LibcameraApp::Msg msg = t->app->Wait(); + // std::cerr<<"msg get"<running.store(false, std::memory_order_release); + } + else if (msg.type != LibcameraApp::MsgType::RequestComplete) + throw std::runtime_error("unrecognised message!"); + + CompletedRequestPtr payload = std::get(msg.payload); + mem = t->app->Mmap(payload->buffers[stream]); + t->mtx.lock(); + memcpy(t->framebuffer, mem[0].data(), buffersize); + t->mtx.unlock(); + t->frameready.store(true, std::memory_order_release); + } + std::cerr << "Thread finished" << std::endl; + if (t->framebuffer) + { + delete[] t->framebuffer; + t->framebuffer = nullptr; + } + return NULL; + } + + bool LibcameraCapture::startVideo() // not resolved + { + // if(camerastarted) stopPhoto(); + if (running.load(std::memory_order_relaxed)) + { + std::cerr << "Video thread already running"; + return false; + } + frameready.store(false, std::memory_order_release); + LibcameraCapture::app->OpenCamera(); + LibcameraCapture::app->ConfigureViewfinder(); + LibcameraCapture::app->StartCamera(); + + int ret = pthread_create(&videothread, NULL, &videoThreadFunc, this); + if (ret != 0) + { + std::cerr << "Error starting video thread" << std::endl; + return false; + } + return true; + } + + void LibcameraCapture::stopVideo() // not resolved + { + if (!running) + return; + + running.store(false, std::memory_order_release); + ; + + // join thread + void *status; + int ret = pthread_join(videothread, &status); + if (ret < 0) + std::cerr << "Error joining thread" << std::endl; + + LibcameraCapture::app->StopCamera(); + LibcameraCapture::app->Teardown(); + LibcameraCapture::app->CloseCamera(); + frameready.store(false, std::memory_order_release); + ; + } + + bool LibcameraCapture::getVideoFrame(cv::Mat &frame, unsigned int timeout) + { + auto start_time = std::chrono::high_resolution_clock::now(); + bool timeout_reached = false; + timespec req; + req.tv_sec = 0; + req.tv_nsec = 1000000; // 1ms + while ((!frameready.load(std::memory_order_acquire)) && (!timeout_reached)) + { + nanosleep(&req, NULL); + timeout_reached = (std::chrono::high_resolution_clock::now() - start_time > std::chrono::milliseconds(timeout)); + } + if (frameready.load(std::memory_order_acquire)) + { + frame.create(vh, vw, CV_8UC3); + uint ls = vw * 3; + mtx.lock(); + uint8_t *ptr = framebuffer; + for (unsigned int i = 0; i < vh; i++, ptr += vstr) + memcpy(frame.ptr(i), ptr, ls); + mtx.unlock(); + frameready.store(false, std::memory_order_release); + ; + return true; + } + else + { + std::cerr << "ERROR: frameready status: " << frameready.load(std::memory_order_acquire) << std::endl; + return false; + } + } + + /** + * @brief Attempt to start the camera and ensure a frame is pending for capture. + * + * This function checks whether a frame is already pending. If a frame is pending, + * it returns immediately with `true`. If no frame is pending, the function attempts + * to configure the camera, start the camera stream, and create a video thread + * to handle video capturing. The `isFramePending` flag is updated accordingly. + * + * @return `true` if a frame is pending for capture. + * `false` if an error occurs while starting the video thread or configuring the camera. + */ + bool LibcameraCapture::grabFrame() + { + if (camerastarted) + { + // if (needsReconfigure) + // { + // // restart the camera + // stopVideo(); + // startVideo(); + // // needsReconfigure = false; + // needsReconfigure.store(false, std::memory_order_release); + // } + return true; + } + else + { + Mat frame; + if (!getVideoFrame(frame, 5000)) + { + std::cerr << "Timeout error" << std::endl; + } + else + { + // std::cerr << "Frame grabbed successfully" << std::endl; + camerastarted = true; + } + // std::cerr << "Error grabbing video frame!" << std::endl; + } + return camerastarted; + } + + /** + * @brief Retrieve a single frame from the video stream and copy it to the destination. + * + * This function waits for a frame to be ready in the framebuffer, extracts the frame data, + * and copies it to the provided OpenCV `OutputArray`. It uses a timeout mechanism to avoid + * indefinite blocking if no frame becomes available. + * + * @param int Unused parameter. + * @param dst An OpenCV `OutputArray` where the retrieved frame will be stored. + * The frame is stored in RGB format (8-bit, 3 channels, CV_8UC3). + * + * @return `true` if a frame is successfully retrieved and copied to `dst`. + * `false` if no frame is ready (e.g., due to timeout or video not running). + */ + bool LibcameraCapture::retrieveFrame(int, OutputArray dst) + { + // if (needsReconfigure) + // { + // // restart the camera + // stopVideo(); + // startVideo(); + // // needsReconfigure = false; + // needsReconfigure.store(false, std::memory_order_release); + // } + + if (!running.load(std::memory_order_acquire)) + return false; + auto start_time = std::chrono::high_resolution_clock::now(); + bool timeout_reached = false; + timespec req; + req.tv_sec = 0; + req.tv_nsec = 1000000; // 1ms + + uint64_t timeout_lim = options->timeout; + while ((!frameready.load(std::memory_order_acquire)) && (!timeout_reached)) + { + nanosleep(&req, NULL); + timeout_reached = (std::chrono::high_resolution_clock::now() - start_time > std::chrono::milliseconds(timeout_lim)); + // timeout=1000. Need to be modified in this class. + } + if (frameready.load(std::memory_order_acquire)) + { + Mat frame(vh, vw, CV_8UC3); + uint ls = vw * 3; + mtx.lock(); + uint8_t *ptr = framebuffer; + for (unsigned int i = 0; i < vh; i++, ptr += vstr) + { + memcpy(frame.ptr(i), ptr, ls); + } + mtx.unlock(); + frameready.store(false, std::memory_order_release); + frame.copyTo(dst); + return true; + } + else + { + // std::cerr << "frame NOT ready!" << std::endl; + return false; + } + } + + double LibcameraCapture::getProperty(int propId) const + { + switch (propId) + { + case cv::CAP_PROP_BRIGHTNESS: + return options->brightness; + + case cv::CAP_PROP_CONTRAST: + return options->contrast; + + case cv::CAP_PROP_SATURATION: + return options->saturation; + + case cv::CAP_PROP_SHARPNESS: + return options->sharpness; + + case cv::CAP_PROP_AUTO_EXPOSURE: + return options->getExposureMode() == Exposure_Modes::EXPOSURE_NORMAL; + + case cv::CAP_PROP_EXPOSURE: + return options->shutter; + + case cv::CAP_PROP_AUTO_WB: + return options->getWhiteBalance() == WhiteBalance_Modes::WB_AUTO; + + case cv::CAP_PROP_WB_TEMPERATURE: + // Since we don't have a direct WB temperature, return an approximation based on the current setting + switch (options->getWhiteBalance()) + { + case WhiteBalance_Modes::WB_TUNGSTEN: + return 3000.0; // Approximate value for tungsten + case WhiteBalance_Modes::WB_INDOOR: + return 4500.0; // Approximate value for indoor + case WhiteBalance_Modes::WB_DAYLIGHT: + return 5500.0; // Approximate value for daylight + case WhiteBalance_Modes::WB_CLOUDY: + return 7000.0; // Approximate value for cloudy + default: + return 5000.0; // Default approximation if none of the above + } + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X: + return options->roi_x; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y: + return options->roi_y; + + case cv::CAP_PROP_XI_AEAG_ROI_WIDTH: + return options->roi_width; + + case cv::CAP_PROP_XI_AEAG_ROI_HEIGHT: + return options->roi_height; + + case cv::CAP_PROP_FOURCC: + { + // Return the FOURCC code of the current video format. + // This is a placeholder. You should replace it with the actual FOURCC code. + // return cv::VideoWriter::fourcc('M', 'J', 'P', 'G'); + // return options->getFourCC(); + std::cerr << "Warning: Not implemented yet" << std::endl; + return 0; + } + + case cv::CAP_PROP_FRAME_WIDTH: + if (options->video_width != 0) + { + return options->video_width; + } + else + { + return options->photo_width; + } + + case cv::CAP_PROP_FRAME_HEIGHT: + if (options->video_height != 0) + { + return options->video_height; + } + else + { + return options->photo_height; + } + + case cv::CAP_PROP_FPS: + return options->framerate; + + case cv::CAP_PROP_AUTOFOCUS: + case cv::CAP_PROP_BUFFERSIZE: + case cv::CAP_PROP_PAN: + case cv::CAP_PROP_TILT: + case cv::CAP_PROP_ROLL: + case cv::CAP_PROP_IRIS: + // Not implemented, return a default value or an error code + std::cerr << "Warning: Property " << propId << " is not supported." << std::endl; + return 0; // Or some other value indicating an error or not supported + + default: + std::cerr << "Warning: Unsupported property: " << propId << std::endl; + return 0; + } + } + + bool LibcameraCapture::setProperty(int propId, double value) + { + switch (propId) + { + case cv::CAP_PROP_BRIGHTNESS: + options->brightness = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_CONTRAST: + options->contrast = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_SATURATION: + options->saturation = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_SHARPNESS: + options->sharpness = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_AUTO_EXPOSURE: + if (value) + { + options->setExposureMode(Exposure_Modes::EXPOSURE_NORMAL); + } + else + { + options->setExposureMode(Exposure_Modes::EXPOSURE_SHORT); + } + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_EXPOSURE: + options->shutter = value; // Assumes value is in milliseconds, libcamera uses seconds + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_AUTO_WB: + options->setWhiteBalance(value ? WhiteBalance_Modes::WB_AUTO : WhiteBalance_Modes::WB_INDOOR); + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_WB_TEMPERATURE: + // Libcamera does not have a direct WB temperature setting, + // you might need to convert this to r/b gains for manual control. + // For now, let's assume a simplified approach. + if (value < 4000) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_TUNGSTEN); + } + else if (value < 5000) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_INDOOR); + } + else if (value < 6500) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_DAYLIGHT); + } + else + { + options->setWhiteBalance(WhiteBalance_Modes::WB_CLOUDY); + } + needsReconfigure.store(true, std::memory_order_release); + break; + + // case cv::CAP_PROP_ZOOM: // This is a custom property for ROI + // options->roi_x = options->roi_y = (1.0 - value) / 2.0; // Assuming value is normalized zoom level (0.0 - 1.0) + // options->roi_width = options->roi_height = value; + // break; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X: + options->roi_x = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y: + options->roi_y = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_WIDTH: + options->roi_width = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_HEIGHT: + options->roi_height = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_FOURCC: + { + // Not implemented yet + + // char fourcc[4]; + // fourcc[0] = (char)((int)value & 0XFF); + // fourcc[1] = (char)(((int)value >> 8) & 0XFF); + // fourcc[2] = (char)(((int)value >> 16) & 0XFF); + // fourcc[3] = (char)(((int)value >> 24) & 0XFF); + // if(fourcc[0]=='M'&&fourcc[1]=='J'&&fourcc[2]=='P'&&fourcc[3]=='G'){ + + // } + // else if(fourcc[0]=='Y'&&fourcc[1]=='U'&&fourcc[2]=='Y'&&fourcc[3]=='V'){ + + // } + // else if(fourcc[0]=='R'&&fourcc[1]=='G'&&fourcc[2]=='B'&&fourcc[3]=='3'){ + // still_flags = LibcameraApp::FLAG_STILL_RGB; + // } + // else{ + // std::cerr << "Warning: FourCC code " << fourcc << " not supported." << std::endl; + // return false; + // } + // // needsReconfigure.store(true, std::memory_order_release); + break; + } + + case cv::CAP_PROP_FRAME_WIDTH: + options->video_width = options->photo_width = (int)value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_FRAME_HEIGHT: + options->video_height = options->photo_height = (int)value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_FPS: + options->framerate = (float)value; + needsReconfigure.store(true, std::memory_order_release); + break; + case cv::CAP_PROP_AUTOFOCUS: // Not implemented + case cv::CAP_PROP_BUFFERSIZE: // Not implemented + case cv::CAP_PROP_PAN: // Not implemented + case cv::CAP_PROP_TILT: // Not implemented + case cv::CAP_PROP_ROLL: // Not implemen ted + case cv::CAP_PROP_IRIS: // Not implemented + // These properties might need to trigger a re-configuration of the camera. + // You can handle them here if you want to support changing resolution or framerate on-the-fly. + // For now, we'll return false to indicate that these properties are not supported for dynamic changes. + std::cerr << "Warning: Property " << propId << " is not supported for dynamic changes." << std::endl; + return false; + + default: + std::cerr << "Warning: Unsupported property: " << propId << std::endl; + return false; + } + + // if (needsReconfigure) + // { + // if (isFramePending) + // { + // stopVideo(); + // startVideo(); + // } + // } + return true; + } + + bool LibcameraCapture::open(int _index) + { + cv::String name; + /* Select camera, or rather, V4L video source */ + if (_index < 0) // Asking for the first device available + { + for (int autoindex = 0; autoindex < 8; ++autoindex) // 8=MAX_CAMERAS + { + name = cv::format("/dev/video%d", autoindex); + /* Test using an open to see if this new device name really does exists. */ + int h = ::open(name.c_str(), O_RDONLY); + if (h != -1) + { + ::close(h); + _index = autoindex; + break; + } + } + if (_index < 0) + { + CV_LOG_WARNING(NULL, "VIDEOIO(Libcamera): can't find camera device"); + name.clear(); + return false; + } + } + else + { + name = cv::format("/dev/video%d", _index); + } + + bool res = open(name); + if (!res) + { + CV_LOG_WARNING(NULL, "VIDEOIO(Libcamera:" << name << "): can't open camera by index"); + } + return res; + } + + bool LibcameraCapture::open(const std::string &_deviceName) + { + CV_LOG_DEBUG(NULL, "VIDEOIO(Libcamera:" << _deviceName << "): opening..."); + // Some parameters initialization here, maybe more needed. + options->video_width = 1280; + options->video_height = 720; + options->framerate = 30; + options->verbose = true; + return startVideo(); + } + + Ptr createLibcameraCapture_file(const std::string &filename) + { + auto ret = makePtr(); + if (ret->open(filename)) + return ret; + return NULL; + } + + Ptr createLibcameraCapture_cam(int index) + { + Ptr cap = makePtr(); + if (cap && cap->open(index)) + return cap; + return Ptr(); + } + +} // namespace \ No newline at end of file diff --git a/modules/videoio/src/cap_libcamera.hpp b/modules/videoio/src/cap_libcamera.hpp new file mode 100644 index 0000000000..75d37bab28 --- /dev/null +++ b/modules/videoio/src/cap_libcamera.hpp @@ -0,0 +1,396 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +// #include +#include +// #include + +namespace cv +{ + + enum Exposure_Modes + { + EXPOSURE_NORMAL = libcamera::controls::ExposureNormal, + EXPOSURE_SHORT = libcamera::controls::ExposureShort, + EXPOSURE_CUSTOM = libcamera::controls::ExposureCustom + }; + + enum Metering_Modes + { + METERING_CENTRE = libcamera::controls::MeteringCentreWeighted, + METERING_SPOT = libcamera::controls::MeteringSpot, + METERING_MATRIX = libcamera::controls::MeteringMatrix, + METERING_CUSTOM = libcamera::controls::MeteringCustom + }; + + enum WhiteBalance_Modes + { + WB_AUTO = libcamera::controls::AwbAuto, + WB_NORMAL = libcamera::controls::AwbAuto, + WB_INCANDESCENT = libcamera::controls::AwbIncandescent, + WB_TUNGSTEN = libcamera::controls::AwbTungsten, + WB_FLUORESCENT = libcamera::controls::AwbFluorescent, + WB_INDOOR = libcamera::controls::AwbIndoor, + WB_DAYLIGHT = libcamera::controls::AwbDaylight, + WB_CLOUDY = libcamera::controls::AwbCloudy, + WB_CUSTOM = libcamera::controls::AwbAuto + }; + + class Options + { + public: + Options() + { + timeout = 1000; + metering_index = Metering_Modes::METERING_CENTRE; + exposure_index = Exposure_Modes::EXPOSURE_NORMAL; + awb_index = WhiteBalance_Modes::WB_AUTO; + saturation = 1.0f; + contrast = 1.0f; + sharpness = 1.0f; + brightness = 0.0f; + shutter = 0.0f; + gain = 0.0f; + ev = 0.0f; + roi_x = roi_y = roi_width = roi_height = 0; + awb_gain_r = awb_gain_b = 0; + denoise = "auto"; + verbose = false; + transform = libcamera::Transform::Identity; + camera = 0; + } + + ~Options() {} + + void setMetering(Metering_Modes meteringmode) { metering_index = meteringmode; } + void setWhiteBalance(WhiteBalance_Modes wb) { awb_index = wb; } + void setExposureMode(Exposure_Modes exp) { exposure_index = exp; } + + int getExposureMode() { return exposure_index; } + int getMeteringMode() { return metering_index; } + int getWhiteBalance() { return awb_index; } + + bool help; + bool version; + bool list_cameras; + bool verbose; + uint64_t timeout; // in ms + unsigned int photo_width, photo_height; + unsigned int video_width, video_height; + bool rawfull; + libcamera::Transform transform; + float roi_x, roi_y, roi_width, roi_height; + float shutter; + float gain; + float ev; + float awb_gain_r; + float awb_gain_b; + float brightness; + float contrast; + float saturation; + float sharpness; + float framerate; + std::string denoise; + std::string info_text; + unsigned int camera; + + protected: + int metering_index; + int exposure_index; + int awb_index; + + private: + }; + struct CompletedRequest; + using CompletedRequestPtr = std::shared_ptr; + + namespace controls = libcamera::controls; + namespace properties = libcamera::properties; + + class LibcameraApp + { + public: + using Stream = libcamera::Stream; + using FrameBuffer = libcamera::FrameBuffer; + using ControlList = libcamera::ControlList; + using Request = libcamera::Request; + using CameraManager = libcamera::CameraManager; + using Camera = libcamera::Camera; + using CameraConfiguration = libcamera::CameraConfiguration; + using FrameBufferAllocator = libcamera::FrameBufferAllocator; + using StreamRole = libcamera::StreamRole; + using StreamRoles = std::vector; + using PixelFormat = libcamera::PixelFormat; + using StreamConfiguration = libcamera::StreamConfiguration; + using BufferMap = Request::BufferMap; + using Size = libcamera::Size; + using Rectangle = libcamera::Rectangle; + enum class MsgType + { + RequestComplete, + Quit + }; + typedef std::variant MsgPayload; + struct Msg + { + Msg(MsgType const &t) : type(t) {} + template + Msg(MsgType const &t, T p) : type(t), payload(std::forward(p)) + { + } + MsgType type; + MsgPayload payload; + }; + + // Some flags that can be used to give hints to the camera configuration. + static constexpr unsigned int FLAG_STILL_NONE = 0; + static constexpr unsigned int FLAG_STILL_BGR = 1; // supply BGR images, not YUV + static constexpr unsigned int FLAG_STILL_RGB = 2; // supply RGB images, not YUV + static constexpr unsigned int FLAG_STILL_RAW = 4; // request raw image stream + static constexpr unsigned int FLAG_STILL_DOUBLE_BUFFER = 8; // double-buffer stream + static constexpr unsigned int FLAG_STILL_TRIPLE_BUFFER = 16; // triple-buffer stream + static constexpr unsigned int FLAG_STILL_BUFFER_MASK = 24; // mask for buffer flags + + static constexpr unsigned int FLAG_VIDEO_NONE = 0; + static constexpr unsigned int FLAG_VIDEO_RAW = 1; // request raw image stream + static constexpr unsigned int FLAG_VIDEO_JPEG_COLOURSPACE = 2; // force JPEG colour space + + LibcameraApp(std::unique_ptr const opts = nullptr); + virtual ~LibcameraApp(); + + Options *GetOptions() const { return options_.get(); } + + std::string const &CameraId() const; + void OpenCamera(); + void CloseCamera(); + + void ConfigureStill(unsigned int flags = FLAG_STILL_NONE); + void ConfigureViewfinder(); + + void Teardown(); + void StartCamera(); + void StopCamera(); + + void ApplyRoiSettings(); + + Msg Wait(); + void PostMessage(MsgType &t, MsgPayload &p); + + Stream *GetStream(std::string const &name, unsigned int *w = nullptr, unsigned int *h = nullptr, + unsigned int *stride = nullptr) const; + Stream *ViewfinderStream(unsigned int *w = nullptr, unsigned int *h = nullptr, + unsigned int *stride = nullptr) const; + Stream *StillStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *RawStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *VideoStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *LoresStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *GetMainStream() const; + + std::vector> Mmap(FrameBuffer *buffer) const; + + void SetControls(ControlList &controls); + void StreamDimensions(Stream const *stream, unsigned int *w, unsigned int *h, unsigned int *stride) const; + + protected: + std::unique_ptr options_; + + private: + static std::shared_ptr getCameraManager() + { + static std::shared_ptr camera_manager_; + if (!camera_manager_) + { + std::cerr << "creating manager" << std::endl; + camera_manager_ = std::make_shared(); + int ret = camera_manager_->start(); + if (ret) + throw std::runtime_error("camera manager failed to start," + "code " + + std::to_string(-ret)); + } + + return camera_manager_; + } + + template + class MessageQueue + { + public: + template + void Post(U &&msg) + { + std::unique_lock lock(mutex_); + queue_.push(std::forward(msg)); + cond_.notify_one(); + } + T Wait() + { + std::unique_lock lock(mutex_); + cond_.wait(lock, [this] + { return !queue_.empty(); }); + T msg = std::move(queue_.front()); + queue_.pop(); + return msg; + } + void Clear() + { + std::unique_lock lock(mutex_); + queue_ = {}; + } + + private: + std::queue queue_; + std::mutex mutex_; + std::condition_variable cond_; + }; + + void setupCapture(); + void makeRequests(); + void queueRequest(CompletedRequest *completed_request); + void requestComplete(Request *request); + void configureDenoise(const std::string &denoise_mode); + + // std::unique_ptr camera_manager_; + std::shared_ptr camera_; + bool camera_acquired_ = false; + std::unique_ptr configuration_; + std::map>> mapped_buffers_; + std::map streams_; + FrameBufferAllocator *allocator_ = nullptr; + std::map> frame_buffers_; + std::queue free_requests_; + std::vector> requests_; + std::mutex completed_requests_mutex_; + std::set completed_requests_; + bool camera_started_ = false; + std::mutex camera_stop_mutex_; + MessageQueue msg_queue_; + // For setting camera controls. + std::mutex control_mutex_; + ControlList controls_; + // Other: + uint64_t last_timestamp_; + uint64_t sequence_ = 0; + }; + + class Metadata + { + public: + Metadata() = default; + + Metadata(Metadata const &other) + { + std::scoped_lock other_lock(other.mutex_); + data_ = other.data_; + } + + Metadata(Metadata &&other) + { + std::scoped_lock other_lock(other.mutex_); + data_ = std::move(other.data_); + other.data_.clear(); + } + + template + void Set(std::string const &tag, T &&value) + { + std::scoped_lock lock(mutex_); + data_.insert_or_assign(tag, std::forward(value)); + } + + template + int Get(std::string const &tag, T &value) const + { + std::scoped_lock lock(mutex_); + auto it = data_.find(tag); + if (it == data_.end()) + return -1; + value = std::any_cast(it->second); + return 0; + } + + void Clear() + { + std::scoped_lock lock(mutex_); + data_.clear(); + } + + Metadata &operator=(Metadata const &other) + { + std::scoped_lock lock(mutex_, other.mutex_); + data_ = other.data_; + return *this; + } + + Metadata &operator=(Metadata &&other) + { + std::scoped_lock lock(mutex_, other.mutex_); + data_ = std::move(other.data_); + other.data_.clear(); + return *this; + } + + void Merge(Metadata &other) + { + std::scoped_lock lock(mutex_, other.mutex_); + data_.merge(other.data_); + } + + template + T *GetLocked(std::string const &tag) + { + // This allows in-place access to the Metadata contents, + // for which you should be holding the lock. + auto it = data_.find(tag); + if (it == data_.end()) + return nullptr; + return std::any_cast(&it->second); + } + + template + void SetLocked(std::string const &tag, T &&value) + { + // Use this only if you're holding the lock yourself. + data_.insert_or_assign(tag, std::forward(value)); + } + + // Note: use of (lowercase) lock and unlock means you can create scoped + // locks with the standard lock classes. + // e.g. std::lock_guard lock(metadata) + void lock() { mutex_.lock(); } + void unlock() { mutex_.unlock(); } + + private: + mutable std::mutex mutex_; + std::map data_; + }; + + struct CompletedRequest + { + using BufferMap = libcamera::Request::BufferMap; + using ControlList = libcamera::ControlList; + using Request = libcamera::Request; + + CompletedRequest(unsigned int seq, Request *r) + : sequence(seq), buffers(r->buffers()), metadata(r->metadata()), request(r) + { + r->reuse(); + } + unsigned int sequence; + BufferMap buffers; + ControlList metadata; + Request *request; + float framerate; + Metadata post_process_metadata; + }; + + class LibcameraCapture; + +}; \ No newline at end of file diff --git a/modules/videoio/src/videoio_registry.cpp b/modules/videoio/src/videoio_registry.cpp index a84258ad90..2797a22f33 100644 --- a/modules/videoio/src/videoio_registry.cpp +++ b/modules/videoio/src/videoio_registry.cpp @@ -105,6 +105,10 @@ static const struct VideoBackendInfo builtin_backends[] = DECLARE_STATIC_BACKEND(CAP_DSHOW, "DSHOW", MODE_CAPTURE_BY_INDEX, 0, create_DShow_capture, 0) #endif +#ifdef HAVE_LIBCAMERA + DECLARE_STATIC_BACKEND(CAP_LIBCAMERA, "LIBCAMERA", MODE_CAPTURE_ALL, createLibcameraCapture_file, createLibcameraCapture_cam, 0) +#endif + // Linux, some Unix #if defined HAVE_CAMV4L2 DECLARE_STATIC_BACKEND(CAP_V4L2, "V4L2", MODE_CAPTURE_ALL, create_V4L_capture_file, create_V4L_capture_cam, 0)