Hi, I’m kinda new to this framework but I’m working with it and need to integrate it to an already existing project. It used OpenCV but it’s moving towards Gstreamer and other frameworks, so in some parts OpenCV is still used, but it will be removed further in time.
My issue is that I have an RTSP stream and I need to obtain the last frame as some sort of image or raw data for posterior work and analysis with the project.
Currently I have created the following pipeline to create an appsink and retrieve the data from there. My current pipeline looks like this:
UPDATE_CHECKPOINT;
GstElement* VideoFrameProcessing::loadVideoCaptureGST(std::string source_video, Timer& timer_videocapture)
{
// ------------------------------------------------------------------------
// [Variables estáticas para sincronización]
// - Mantienen la misma funcionalidad que el original pero con nombres
// acordes a GStreamer (_gst).
// ------------------------------------------------------------------------
static std::recursive_mutex queue_gst_mutex;
static std::condition_variable_any queue_gst;
UPDATE_CHECKPOINT;
GstElement* _pipeline = nullptr;
bool finish_connecting = false;
while (!finish_connecting)
{
if (!video->info_video.is_a_ptz
|| video->info_video.forced_task
|| video->preset_ptz->initial_configuration)
{
queue_to_connect_cameras.push_back(video);
std::unique_lock<std::recursive_mutex> lk(queue_gst_mutex);
queue_gst.wait(lk, [&] {
LOCK_SCOPED_MUTEX(queue_to_connect_cameras.mutex_vector);
if (queue_to_connect_cameras.size() > 0 && queue_to_connect_cameras[0] == video)
return true;
return false;
});
}
logNeuralAndPrint("Conectandose a camara %s\n", source_video.c_str());
if (video->process_video)
{
timer_videocapture.startTimer();
UPDATE_CHECKPOINT;
// Si es BROADCASTED_REMOTE_VIDEO, cargamos con el método propio
if (video->info_video.type_video == BROADCASTED_REMOTE_VIDEO)
{
//Falla porque la funcion loadVideoCaptureFromRemoteNeural aun no
esta adaptada a GStream, sigue pensada para OpenCV
//loadVideoCaptureFromRemoteNeural(_pipeline, source_video);
}
else
{
// Construimos un pipeline GStreamers
GError* error = nullptr;
//std::string pipeline_str = "rtspsrc location=" + source_video + " ! rtph264depay ! decodebin ! videoconvert ! autovideosink"; //Working line video/x-raw, format=RGB ! appsink name=sink"
//std::string pipeline_str = "rtspsrc location=" + source_video + " ! rtph264depay ! queue ! decodebin ! tee name=t ! t. ! queue ! videoconvert ! autovideosink name=streamsink ! t. ! queue ! videoconvert ! appsink";
//std::string pipeline_str = "rtspsrc location=" + source_video + " ! rtph264depay ! queue ! decodebin ! tee name=t t. ! queue ! videoconvert ! autovideosink name=streamsink t. ! videoconvert ! appsink";
std::string pipeline_str = "rtspsrc location=" + source_video + " ! rtph264depay ! queue ! decodebin ! videoconvert ! video/x-raw,format=RGBA ! appsink name=stream";
GstElement* temp_pipeline = gst_parse_launch(pipeline_str.c_str(), &error);
if (temp_pipeline && !error)
{
GstStateChangeReturn ret = gst_element_set_state(temp_pipeline, GST_STATE_PLAYING);
if (ret != GST_STATE_CHANGE_FAILURE)
{
_pipeline = temp_pipeline;
}
else
{
// Falla la reproducción => liberamos recursos
gst_element_set_state(temp_pipeline, GST_STATE_NULL);
gst_object_unref(temp_pipeline);
temp_pipeline = nullptr;
}
}
else
{
//Si falla el parse => liberar
if (temp_pipeline)
{
gst_object_unref(temp_pipeline);
}
}
// Para PTZ en config inicial, se guardaría pipeline_
if (video->info_video.is_a_ptz
&& video->preset_ptz->video->preset_ptz->initial_configuration)
{
// Podríamos almacenar pipeline_ en la config PTZ
Video::current_ptz_configuration[video->info_video.id_ptz_parent].pipeline_gst = _pipeline;
}
}
UPDATE_CHECKPOINT;
timer_videocapture.stopTimer();
finish_connecting = true;
}
else if (!video->process_video)
{
// Si se desactiva el vídeo en la espera
finish_connecting = true;
}
else
{
// Caso: PTZ con config previa => usar pipeline ya existente
_pipeline = Video::current_ptz_configuration[video->info_video.id_ptz_parent].pipeline_gst;
videocapture_directly_connected = false;
finish_connecting = true;
}
}
// Sacamos 'video' de la cola (ya intentamos)
queue_to_connect_cameras.deleteComponentFromVector(video);
// Notificamos cambios en la cola usando "gst_queue_cv"
{
std::lock_guard<std::recursive_mutex> lk(queue_gst_mutex);
queue_gst.notify_all();
}
// Logs para depurar si se ha abierto o no
if (_pipeline != nullptr) {
logNeuralAndPrint("Videocapture opened %s\n", video->info_video.source_video.c_str());
}
else if (!video->process_video) {
logNeuralAndPrint("Process video = false --> Videocapture NOT done %s\n", video->info_video.source_video.c_str());
}
else {
// Si no se pudo abrir, esperamos 500 ms antes de reintentar en el bucle
// (Para ahorrar CPU, la esperamos aquí en vez de en un polling infinito)
logNeuralAndPrint("Videocapture NOT opened %s (Timeout en %d s) \n", video->info_video.source_video.c_str(), static_cast<int>(timer_videocapture.getLastValueMs() / 1000));
Sleep(500);
}
UPDATE_CHECKPOINT;
return _pipeline;
}
And I’m trying to obtain the frame in another part of the code with the following lines:
cv::Mat last_frame_retrieved;
std::vector<uint8_t*> frames_gst;
current_gst.sink = gst_bin_get_by_name(GST_BIN(current_gst.pipeline), "stream");
current_gst.appsink = GST_APP_SINK(current_gst.sink);
//GST_IS_APP_SINK(appsink);
current_gst.sample = gst_app_sink_pull_sample(current_gst.appsink); //change timeout time
current_gst.buffer = gst_sample_get_buffer(current_gst.sample);
GstMapInfo map;
memset(&map, 0, sizeof(map));
GstVideoFrame frame;
GstVideoInfo info;
//gst_video_frame_map(&frame, map.data, buffer, );
int gst_width, gst_height;
last_frame_retrieved = gst_buffer_map(current_gst.buffer, &map, GST_MAP_READ);
frames_gst.push_back(map.data);
//last_frame_retrieved = map.data;
GstCaps* caps = gst_sample_get_caps(current_gst.sample);
GstStructure* capsStruct = gst_caps_get_structure(caps, 0);
gst_structure_get_int(capsStruct, "width", &gst_width);
gst_structure_get_int(capsStruct, "height", &gst_height);
//stbi_write_jpg("jpg_test.jpg", gst_width, gst_height, 3, (char*)map.data, 70);
//cv::imwrite("C:\XYZTEST.jpg", last_frame_retrieved);
//cv::imshow("TEST", map.data);
Problem is that when my code arribes to the GstInfoMap, the variable returns null and there’s nothing inside. I don’t know if the frame info is inside the map or the buffer, or if I should change some parts of the pipeline.
So I don’t know how to solve this, if there’s anyone that knows how to or could point me into the right direction into obtaining the last frame from the stream I would really appreciate it.
Thank you!!
PS: I know that there’s no explicit or direct conversion from Gstreamer data to OpenCV class, but for the moment if I could use the cv::Mat last_frame_retrieved as a container for the frame it would also work, but the idea is to not use OpenCV.