From 28feae1ec00a9f5aae920e6a5e39b78a7ce9ef71 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 13 Oct 2021 12:12:49 -0600 Subject: [PATCH 01/10] Add new feature to build 1.14 backport of the sink --- Cargo.toml | 3 + build.rs | 37 +- src/base/aggregator.rs | 106 + src/base/aggregator_pad.rs | 23 + src/base/auto/aggregator.rs | 215 ++ src/base/auto/aggregator_pad.rs | 189 ++ src/base/auto/mod.rs | 13 + src/base/ffi.rs | 235 ++ src/base/gstaggregator.c | 3491 +++++++++++++++++++++++++++ src/base/gstaggregator.h | 396 +++ src/base/mod.rs | 26 + src/base/subclass/aggregator.rs | 971 ++++++++ src/base/subclass/aggregator_pad.rs | 138 ++ src/base/subclass/mod.rs | 17 + src/base/utils.rs | 26 + 15 files changed, 5885 insertions(+), 1 deletion(-) create mode 100644 src/base/aggregator.rs create mode 100644 src/base/aggregator_pad.rs create mode 100644 src/base/auto/aggregator.rs create mode 100644 src/base/auto/aggregator_pad.rs create mode 100644 src/base/auto/mod.rs create mode 100644 src/base/ffi.rs create mode 100644 src/base/gstaggregator.c create mode 100644 src/base/gstaggregator.h create mode 100644 src/base/mod.rs create mode 100644 src/base/subclass/aggregator.rs create mode 100644 src/base/subclass/aggregator_pad.rs create mode 100644 src/base/subclass/mod.rs create mode 100644 src/base/utils.rs diff --git a/Cargo.toml b/Cargo.toml index bb64db6..328a15e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,12 +19,15 @@ byteorder = "1.0" [build-dependencies] gst-plugin-version-helper = "0.7" +cc = "1.0" +pkg-config = "0.3" [features] default = ["interlaced-fields", "reference-timestamps", "sink"] interlaced-fields = ["gst/v1_16", "gst-video/v1_16"] reference-timestamps = ["gst/v1_14"] sink = ["gst/v1_18", "gst-base/v1_18"] +sink-v1_14 = ["gst/v1_14", "gst-base/v1_14"] advanced-sdk = [] [lib] diff --git a/build.rs b/build.rs index cda12e5..66e6b24 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,38 @@ fn main() { - gst_plugin_version_helper::info() + gst_plugin_version_helper::info(); + + if !cfg!(feature = "sink-v1_14") { + return; + } + + let gstreamer = pkg_config::probe_library("gstreamer-1.0").unwrap(); + let includes = [gstreamer.include_paths]; + + let files = ["src/base/gstaggregator.c"]; + + let mut build = cc::Build::new(); + build.include("src/base"); + + for f in files.iter() { + build.file(f); + } + + for p in includes.iter().flatten() { + build.include(p); + } + + build.define( + "PACKAGE_BUGREPORT", + "\"https://gitlab.freedesktop.org/gstreamer/gstreamer/issues/new\"", + ); + build.extra_warnings(false); + build.define("GstAggregator", "GstAggregatorFallback"); + build.define("GstAggregatorClass", "GstAggregatorFallbackClass"); + build.define("GstAggregatorPrivate", "GstAggregatorFallbackPrivate"); + build.define("GstAggregatorPad", "GstAggregatorFallbackPad"); + build.define("GstAggregatorPadClass", "GstAggregatorFallbackPadClass"); + build.define("GstAggregatorPadPrivate", "GstAggregatorFallbackPadPrivate"); + build.define("GST_BASE_API", "G_GNUC_INTERNAL"); + + build.compile("libgstaggregator-c.a"); } diff --git a/src/base/aggregator.rs b/src/base/aggregator.rs new file mode 100644 index 0000000..e17d0f6 --- /dev/null +++ b/src/base/aggregator.rs @@ -0,0 +1,106 @@ +// Take a look at the license at the top of the repository in the LICENSE file. + +use super::ffi; +use super::Aggregator; + +use glib::signal::{connect_raw, SignalHandlerId}; +use glib::translate::*; +use glib::IsA; +use glib::Value; +use gst::glib; +use gst::prelude::*; + +use std::boxed::Box as Box_; +use std::mem; +use std::ptr; + +pub trait AggregatorExtManual: 'static { + fn allocator(&self) -> (Option, gst::AllocationParams); + + fn finish_buffer(&self, buffer: gst::Buffer) -> Result; + fn min_upstream_latency(&self) -> gst::ClockTime; + + fn set_min_upstream_latency(&self, min_upstream_latency: gst::ClockTime); + + #[doc(alias = "min-upstream-latency")] + fn connect_min_upstream_latency_notify( + &self, + f: F, + ) -> SignalHandlerId; +} + +impl> AggregatorExtManual for O { + fn allocator(&self) -> (Option, gst::AllocationParams) { + unsafe { + let mut allocator = ptr::null_mut(); + let mut params = mem::zeroed(); + ffi::gst_aggregator_get_allocator( + self.as_ref().to_glib_none().0, + &mut allocator, + &mut params, + ); + (from_glib_full(allocator), params.into()) + } + } + + fn finish_buffer(&self, buffer: gst::Buffer) -> Result { + unsafe { + try_from_glib(ffi::gst_aggregator_finish_buffer( + self.as_ref().to_glib_none().0, + buffer.into_ptr(), + )) + } + } + + fn min_upstream_latency(&self) -> gst::ClockTime { + unsafe { + let mut value = Value::from_type(::static_type()); + glib::gobject_ffi::g_object_get_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"min-upstream-latency\0".as_ptr() as *const _, + value.to_glib_none_mut().0, + ); + value + .get() + .expect("AggregatorExtManual::min_upstream_latency") + } + } + + fn set_min_upstream_latency(&self, min_upstream_latency: gst::ClockTime) { + unsafe { + glib::gobject_ffi::g_object_set_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"min-upstream-latency\0".as_ptr() as *const _, + Value::from(&min_upstream_latency).to_glib_none().0, + ); + } + } + + fn connect_min_upstream_latency_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::min-upstream-latency\0".as_ptr() as *const _, + Some(mem::transmute::<_, unsafe extern "C" fn()>( + notify_min_upstream_latency_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } +} + +unsafe extern "C" fn notify_min_upstream_latency_trampoline( + this: *mut ffi::GstAggregator, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, +) where + P: IsA, +{ + let f: &F = &*(f as *const F); + f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) +} diff --git a/src/base/aggregator_pad.rs b/src/base/aggregator_pad.rs new file mode 100644 index 0000000..794ba1c --- /dev/null +++ b/src/base/aggregator_pad.rs @@ -0,0 +1,23 @@ +// Take a look at the license at the top of the repository in the LICENSE file. + +use super::ffi; +use super::AggregatorPad; + +use glib::object::IsA; +use glib::translate::*; +use gst::glib; + +pub trait AggregatorPadExtManual: 'static { + #[doc(alias = "get_segment")] + fn segment(&self) -> gst::Segment; +} + +impl> AggregatorPadExtManual for O { + fn segment(&self) -> gst::Segment { + unsafe { + let ptr: &ffi::GstAggregatorPad = &*(self.as_ptr() as *const _); + let _guard = super::utils::MutexGuard::lock(&ptr.parent.object.lock); + from_glib_none(&ptr.segment as *const gst::ffi::GstSegment) + } + } +} diff --git a/src/base/auto/aggregator.rs b/src/base/auto/aggregator.rs new file mode 100644 index 0000000..05227fe --- /dev/null +++ b/src/base/auto/aggregator.rs @@ -0,0 +1,215 @@ +// This file was generated by gir (https://github.com/gtk-rs/gir) +// from gir-files (https://github.com/gtk-rs/gir-files) +// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git) +// DO NOT EDIT + +use super::super::ffi; + +use glib::signal::connect_raw; +use glib::signal::SignalHandlerId; +use glib::translate::*; + +use gst::glib; +use gst::prelude::*; + +use std::boxed::Box as Box_; +use std::mem::transmute; + +glib::wrapper! { + pub struct Aggregator(Object) @extends gst::Element, gst::Object; + + match fn { + type_ => || ffi::gst_aggregator_get_type(), + } +} + +unsafe impl Send for Aggregator {} +unsafe impl Sync for Aggregator {} + +pub const NONE_AGGREGATOR: Option<&Aggregator> = None; + +pub trait AggregatorExt: 'static { + //#[doc(alias = "gst_aggregator_get_allocator")] + //#[doc(alias = "get_allocator")] + //fn allocator(&self, allocator: /*Ignored*/Option, params: /*Ignored*/gst::AllocationParams); + + #[doc(alias = "gst_aggregator_get_buffer_pool")] + #[doc(alias = "get_buffer_pool")] + fn buffer_pool(&self) -> Option; + + #[doc(alias = "gst_aggregator_get_latency")] + #[doc(alias = "get_latency")] + fn latency(&self) -> Option; + + #[doc(alias = "gst_aggregator_negotiate")] + fn negotiate(&self) -> bool; + + #[doc(alias = "gst_aggregator_set_latency")] + fn set_latency( + &self, + min_latency: gst::ClockTime, + max_latency: impl Into>, + ); + + #[doc(alias = "gst_aggregator_set_src_caps")] + fn set_src_caps(&self, caps: &gst::Caps); + + #[doc(alias = "gst_aggregator_simple_get_next_time")] + fn simple_get_next_time(&self) -> Option; + + #[doc(alias = "start-time")] + fn start_time(&self) -> u64; + + #[doc(alias = "start-time")] + fn set_start_time(&self, start_time: u64); + + #[doc(alias = "latency")] + fn connect_latency_notify(&self, f: F) + -> SignalHandlerId; + + #[doc(alias = "start-time")] + fn connect_start_time_notify( + &self, + f: F, + ) -> SignalHandlerId; +} + +impl> AggregatorExt for O { + //fn allocator(&self, allocator: /*Ignored*/Option, params: /*Ignored*/gst::AllocationParams) { + // unsafe { TODO: call ffi:gst_aggregator_get_allocator() } + //} + + fn buffer_pool(&self) -> Option { + unsafe { + from_glib_full(ffi::gst_aggregator_get_buffer_pool( + self.as_ref().to_glib_none().0, + )) + } + } + + fn latency(&self) -> Option { + unsafe { + from_glib(ffi::gst_aggregator_get_latency( + self.as_ref().to_glib_none().0, + )) + } + } + + fn negotiate(&self) -> bool { + unsafe { + from_glib(ffi::gst_aggregator_negotiate( + self.as_ref().to_glib_none().0, + )) + } + } + + fn set_latency( + &self, + min_latency: gst::ClockTime, + max_latency: impl Into>, + ) { + unsafe { + ffi::gst_aggregator_set_latency( + self.as_ref().to_glib_none().0, + min_latency.into_glib(), + max_latency.into().into_glib(), + ); + } + } + + fn set_src_caps(&self, caps: &gst::Caps) { + unsafe { + ffi::gst_aggregator_set_src_caps(self.as_ref().to_glib_none().0, caps.to_glib_none().0); + } + } + + fn simple_get_next_time(&self) -> Option { + unsafe { + from_glib(ffi::gst_aggregator_simple_get_next_time( + self.as_ref().to_glib_none().0, + )) + } + } + + fn start_time(&self) -> u64 { + unsafe { + let mut value = glib::Value::from_type(::static_type()); + glib::gobject_ffi::g_object_get_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"start-time\0".as_ptr() as *const _, + value.to_glib_none_mut().0, + ); + value + .get() + .expect("Return Value for property `start-time` getter") + } + } + + fn set_start_time(&self, start_time: u64) { + unsafe { + glib::gobject_ffi::g_object_set_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"start-time\0".as_ptr() as *const _, + start_time.to_value().to_glib_none().0, + ); + } + } + + #[doc(alias = "latency")] + fn connect_latency_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn notify_latency_trampoline< + P: IsA, + F: Fn(&P) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregator, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::latency\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + notify_latency_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } + + #[doc(alias = "start-time")] + fn connect_start_time_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn notify_start_time_trampoline< + P: IsA, + F: Fn(&P) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregator, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::start-time\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + notify_start_time_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } +} diff --git a/src/base/auto/aggregator_pad.rs b/src/base/auto/aggregator_pad.rs new file mode 100644 index 0000000..a4c0a56 --- /dev/null +++ b/src/base/auto/aggregator_pad.rs @@ -0,0 +1,189 @@ +// This file was generated by gir (https://github.com/gtk-rs/gir) +// from gir-files (https://github.com/gtk-rs/gir-files) +// DO NOT EDIT + +use super::super::ffi; + +use glib::signal::connect_raw; +use glib::signal::SignalHandlerId; +use glib::translate::*; + +use gst::glib; +use gst::prelude::*; + +use std::boxed::Box as Box_; +use std::mem::transmute; + +glib::wrapper! { + pub struct AggregatorPad(Object) @extends gst::Pad, gst::Object; + + match fn { + type_ => || ffi::gst_aggregator_pad_get_type(), + } +} + +unsafe impl Send for AggregatorPad {} +unsafe impl Sync for AggregatorPad {} + +pub const NONE_AGGREGATOR_PAD: Option<&AggregatorPad> = None; + +pub trait AggregatorPadExt: 'static { + #[doc(alias = "gst_aggregator_pad_drop_buffer")] + fn drop_buffer(&self) -> bool; + + #[doc(alias = "gst_aggregator_pad_has_buffer")] + fn has_buffer(&self) -> bool; + + #[doc(alias = "gst_aggregator_pad_is_eos")] + fn is_eos(&self) -> bool; + + #[doc(alias = "gst_aggregator_pad_peek_buffer")] + fn peek_buffer(&self) -> Option; + + #[doc(alias = "gst_aggregator_pad_pop_buffer")] + fn pop_buffer(&self) -> Option; + + #[doc(alias = "emit-signals")] + fn emits_signals(&self) -> bool; + + #[doc(alias = "emit-signals")] + fn set_emit_signals(&self, emit_signals: bool); + + fn connect_buffer_consumed( + &self, + f: F, + ) -> SignalHandlerId; + + #[doc(alias = "emit-signals")] + fn connect_emit_signals_notify( + &self, + f: F, + ) -> SignalHandlerId; +} + +impl> AggregatorPadExt for O { + fn drop_buffer(&self) -> bool { + unsafe { + from_glib(ffi::gst_aggregator_pad_drop_buffer( + self.as_ref().to_glib_none().0, + )) + } + } + + fn has_buffer(&self) -> bool { + unsafe { + from_glib(ffi::gst_aggregator_pad_has_buffer( + self.as_ref().to_glib_none().0, + )) + } + } + + fn is_eos(&self) -> bool { + unsafe { + from_glib(ffi::gst_aggregator_pad_is_eos( + self.as_ref().to_glib_none().0, + )) + } + } + + fn peek_buffer(&self) -> Option { + unsafe { + from_glib_full(ffi::gst_aggregator_pad_peek_buffer( + self.as_ref().to_glib_none().0, + )) + } + } + + fn pop_buffer(&self) -> Option { + unsafe { + from_glib_full(ffi::gst_aggregator_pad_pop_buffer( + self.as_ref().to_glib_none().0, + )) + } + } + + fn emits_signals(&self) -> bool { + unsafe { + let mut value = glib::Value::from_type(::static_type()); + glib::gobject_ffi::g_object_get_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"emit-signals\0".as_ptr() as *const _, + value.to_glib_none_mut().0, + ); + value + .get() + .expect("Return Value for property `emit-signals` getter") + } + } + + fn set_emit_signals(&self, emit_signals: bool) { + unsafe { + glib::gobject_ffi::g_object_set_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"emit-signals\0".as_ptr() as *const _, + emit_signals.to_value().to_glib_none().0, + ); + } + } + + #[doc(alias = "buffer-consumed")] + fn connect_buffer_consumed( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn buffer_consumed_trampoline< + P: IsA, + F: Fn(&P, &gst::Buffer) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregatorPad, + object: *mut gst::ffi::GstBuffer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f( + &AggregatorPad::from_glib_borrow(this).unsafe_cast_ref(), + &from_glib_borrow(object), + ) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"buffer-consumed\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + buffer_consumed_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } + + #[doc(alias = "emit-signals")] + fn connect_emit_signals_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn notify_emit_signals_trampoline< + P: IsA, + F: Fn(&P) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregatorPad, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f(&AggregatorPad::from_glib_borrow(this).unsafe_cast_ref()) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::emit-signals\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + notify_emit_signals_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } +} diff --git a/src/base/auto/mod.rs b/src/base/auto/mod.rs new file mode 100644 index 0000000..09fb05f --- /dev/null +++ b/src/base/auto/mod.rs @@ -0,0 +1,13 @@ +mod aggregator; +pub use self::aggregator::AggregatorExt; +pub use self::aggregator::{Aggregator, NONE_AGGREGATOR}; + +mod aggregator_pad; +pub use self::aggregator_pad::AggregatorPadExt; +pub use self::aggregator_pad::{AggregatorPad, NONE_AGGREGATOR_PAD}; + +#[doc(hidden)] +pub mod traits { + pub use super::AggregatorExt; + pub use super::AggregatorPadExt; +} diff --git a/src/base/ffi.rs b/src/base/ffi.rs new file mode 100644 index 0000000..e4004f5 --- /dev/null +++ b/src/base/ffi.rs @@ -0,0 +1,235 @@ +#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)] +#![allow( + clippy::approx_constant, + clippy::type_complexity, + clippy::unreadable_literal +)] + +use gst::ffi as gst; + +#[allow(unused_imports)] +use libc::{ + c_char, c_double, c_float, c_int, c_long, c_short, c_uchar, c_uint, c_ulong, c_ushort, c_void, + intptr_t, size_t, ssize_t, time_t, uintptr_t, FILE, +}; + +#[allow(unused_imports)] +use ::gst::glib::ffi::{gboolean, gconstpointer, gpointer, GType}; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GstAggregatorClass { + pub parent_class: gst::GstElementClass, + pub flush: Option gst::GstFlowReturn>, + pub clip: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstBuffer, + ) -> *mut gst::GstBuffer, + >, + pub finish_buffer: + Option gst::GstFlowReturn>, + pub sink_event: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstEvent, + ) -> gboolean, + >, + pub sink_query: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstQuery, + ) -> gboolean, + >, + pub src_event: Option gboolean>, + pub src_query: Option gboolean>, + pub src_activate: + Option gboolean>, + pub aggregate: Option gst::GstFlowReturn>, + pub stop: Option gboolean>, + pub start: Option gboolean>, + pub get_next_time: Option gst::GstClockTime>, + pub create_new_pad: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut gst::GstPadTemplate, + *const c_char, + *const gst::GstCaps, + ) -> *mut GstAggregatorPad, + >, + pub update_src_caps: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut gst::GstCaps, + *mut *mut gst::GstCaps, + ) -> gst::GstFlowReturn, + >, + pub fixate_src_caps: + Option *mut gst::GstCaps>, + pub negotiated_src_caps: + Option gboolean>, + pub decide_allocation: + Option gboolean>, + pub propose_allocation: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstQuery, + *mut gst::GstQuery, + ) -> gboolean, + >, + pub negotiate: Option gboolean>, + pub sink_event_pre_queue: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstEvent, + ) -> gboolean, + >, + pub sink_query_pre_queue: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + *mut gst::GstQuery, + ) -> gboolean, + >, + pub _gst_reserved: [gpointer; 17], +} + +impl ::std::fmt::Debug for GstAggregatorClass { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + f.debug_struct(&format!("GstAggregatorClass @ {:?}", self as *const _)) + .field("parent_class", &self.parent_class) + .field("flush", &self.flush) + .field("clip", &self.clip) + .field("finish_buffer", &self.finish_buffer) + .field("sink_event", &self.sink_event) + .field("sink_query", &self.sink_query) + .field("src_event", &self.src_event) + .field("src_query", &self.src_query) + .field("src_activate", &self.src_activate) + .field("aggregate", &self.aggregate) + .field("stop", &self.stop) + .field("start", &self.start) + .field("get_next_time", &self.get_next_time) + .field("create_new_pad", &self.create_new_pad) + .field("update_src_caps", &self.update_src_caps) + .field("fixate_src_caps", &self.fixate_src_caps) + .field("negotiated_src_caps", &self.negotiated_src_caps) + .field("decide_allocation", &self.decide_allocation) + .field("propose_allocation", &self.propose_allocation) + .finish() + } +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GstAggregatorPadClass { + pub parent_class: gst::GstPadClass, + pub flush: Option< + unsafe extern "C" fn(*mut GstAggregatorPad, *mut GstAggregator) -> gst::GstFlowReturn, + >, + pub skip_buffer: Option< + unsafe extern "C" fn( + *mut GstAggregatorPad, + *mut GstAggregator, + *mut gst::GstBuffer, + ) -> gboolean, + >, + pub _gst_reserved: [gpointer; 20], +} + +impl ::std::fmt::Debug for GstAggregatorPadClass { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + f.debug_struct(&format!("GstAggregatorPadClass @ {:?}", self as *const _)) + .field("parent_class", &self.parent_class) + .field("flush", &self.flush) + .field("skip_buffer", &self.skip_buffer) + .finish() + } +} + +#[repr(C)] +pub struct _GstAggregatorPadPrivate(c_void); + +pub type GstAggregatorPadPrivate = *mut _GstAggregatorPadPrivate; + +#[repr(C)] +pub struct _GstAggregatorPrivate(c_void); + +pub type GstAggregatorPrivate = *mut _GstAggregatorPrivate; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GstAggregator { + pub parent: gst::GstElement, + pub srcpad: *mut gst::GstPad, + pub priv_: *mut GstAggregatorPrivate, + pub _gst_reserved: [gpointer; 20], +} + +impl ::std::fmt::Debug for GstAggregator { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + f.debug_struct(&format!("GstAggregator @ {:?}", self as *const _)) + .field("parent", &self.parent) + .field("srcpad", &self.srcpad) + .finish() + } +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GstAggregatorPad { + pub parent: gst::GstPad, + pub segment: gst::GstSegment, + pub priv_: *mut GstAggregatorPadPrivate, + pub _gst_reserved: [gpointer; 4], +} + +impl ::std::fmt::Debug for GstAggregatorPad { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + f.debug_struct(&format!("GstAggregatorPad @ {:?}", self as *const _)) + .field("parent", &self.parent) + .field("segment", &self.segment) + .finish() + } +} + +extern "C" { + //========================================================================= + // GstAggregator + //========================================================================= + pub fn gst_aggregator_get_type() -> GType; + pub fn gst_aggregator_finish_buffer( + aggregator: *mut GstAggregator, + buffer: *mut gst::GstBuffer, + ) -> gst::GstFlowReturn; + pub fn gst_aggregator_negotiate(aggregator: *mut GstAggregator) -> gboolean; + pub fn gst_aggregator_get_allocator( + self_: *mut GstAggregator, + allocator: *mut *mut gst::GstAllocator, + params: *mut gst::GstAllocationParams, + ); + pub fn gst_aggregator_get_buffer_pool(self_: *mut GstAggregator) -> *mut gst::GstBufferPool; + pub fn gst_aggregator_get_latency(self_: *mut GstAggregator) -> gst::GstClockTime; + pub fn gst_aggregator_set_latency( + self_: *mut GstAggregator, + min_latency: gst::GstClockTime, + max_latency: gst::GstClockTime, + ); + pub fn gst_aggregator_set_src_caps(self_: *mut GstAggregator, caps: *mut gst::GstCaps); + pub fn gst_aggregator_simple_get_next_time(self_: *mut GstAggregator) -> gst::GstClockTime; + + //========================================================================= + // GstAggregatorPad + //========================================================================= + pub fn gst_aggregator_pad_get_type() -> GType; + pub fn gst_aggregator_pad_drop_buffer(pad: *mut GstAggregatorPad) -> gboolean; + pub fn gst_aggregator_pad_has_buffer(pad: *mut GstAggregatorPad) -> gboolean; + pub fn gst_aggregator_pad_is_eos(pad: *mut GstAggregatorPad) -> gboolean; + pub fn gst_aggregator_pad_peek_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer; + pub fn gst_aggregator_pad_pop_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer; +} diff --git a/src/base/gstaggregator.c b/src/base/gstaggregator.c new file mode 100644 index 0000000..436ed40 --- /dev/null +++ b/src/base/gstaggregator.c @@ -0,0 +1,3491 @@ +/* GStreamer aggregator base class + * Copyright (C) 2014 Mathieu Duponchelle + * Copyright (C) 2014 Thibault Saunier + * + * gstaggregator.c: + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ +/** + * SECTION: gstaggregator + * @title: GstAggregator + * @short_description: Base class for mixers and muxers, manages a set of input + * pads and aggregates their streams + * @see_also: gstcollectpads for historical reasons. + * + * Manages a set of pads with the purpose of aggregating their buffers. + * Control is given to the subclass when all pads have data. + * + * * Base class for mixers and muxers. Subclasses should at least implement + * the #GstAggregatorClass.aggregate() virtual method. + * + * * Installs a #GstPadChainFunction, a #GstPadEventFullFunction and a + * #GstPadQueryFunction to queue all serialized data packets per sink pad. + * Subclasses should not overwrite those, but instead implement + * #GstAggregatorClass.sink_event() and #GstAggregatorClass.sink_query() as + * needed. + * + * * When data is queued on all pads, the aggregate vmethod is called. + * + * * One can peek at the data on any given GstAggregatorPad with the + * gst_aggregator_pad_peek_buffer () method, and remove it from the pad + * with the gst_aggregator_pad_pop_buffer () method. When a buffer + * has been taken with pop_buffer (), a new buffer can be queued + * on that pad. + * + * * If the subclass wishes to push a buffer downstream in its aggregate + * implementation, it should do so through the + * gst_aggregator_finish_buffer () method. This method will take care + * of sending and ordering mandatory events such as stream start, caps + * and segment. + * + * * Same goes for EOS events, which should not be pushed directly by the + * subclass, it should instead return GST_FLOW_EOS in its aggregate + * implementation. + * + * * Note that the aggregator logic regarding gap event handling is to turn + * these into gap buffers with matching PTS and duration. It will also + * flag these buffers with GST_BUFFER_FLAG_GAP and GST_BUFFER_FLAG_DROPPABLE + * to ease their identification and subsequent processing. + * + * * Subclasses must use (a subclass of) #GstAggregatorPad for both their + * sink and source pads. + * See gst_element_class_add_static_pad_template_with_gtype(). + * + * This class used to live in gst-plugins-bad and was moved to core. + * + * Since: 1.14 + */ + +/** + * SECTION: gstaggregatorpad + * @title: GstAggregatorPad + * @short_description: #GstPad subclass for pads managed by #GstAggregator + * @see_also: gstcollectpads for historical reasons. + * + * Pads managed by a #GstAggregator subclass. + * + * This class used to live in gst-plugins-bad and was moved to core. + * + * Since: 1.14 + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include /* strlen */ + +#include "gstaggregator.h" + +typedef enum +{ + GST_AGGREGATOR_START_TIME_SELECTION_ZERO, + GST_AGGREGATOR_START_TIME_SELECTION_FIRST, + GST_AGGREGATOR_START_TIME_SELECTION_SET +} GstAggregatorStartTimeSelection; + +static GType +gst_aggregator_start_time_selection_get_type (void) +{ + static GType gtype = 0; + + if (gtype == 0) { + static const GEnumValue values[] = { + {GST_AGGREGATOR_START_TIME_SELECTION_ZERO, + "Start at 0 running time (default)", "zero"}, + {GST_AGGREGATOR_START_TIME_SELECTION_FIRST, + "Start at first observed input running time", "first"}, + {GST_AGGREGATOR_START_TIME_SELECTION_SET, + "Set start time with start-time property", "set"}, + {0, NULL, NULL} + }; + + gtype = + g_enum_register_static ("GstAggregatorFallbackStartTimeSelection", + values); + } + return gtype; +} + +/* Might become API */ +#if 0 +static void gst_aggregator_merge_tags (GstAggregator * aggregator, + const GstTagList * tags, GstTagMergeMode mode); +#endif +static void gst_aggregator_set_latency_property (GstAggregator * agg, + GstClockTime latency); +static GstClockTime gst_aggregator_get_latency_property (GstAggregator * agg); + +static GstClockTime gst_aggregator_get_latency_unlocked (GstAggregator * self); + +static void gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, + GstBuffer * buffer); + +GST_DEBUG_CATEGORY_STATIC (aggregator_debug); +#define GST_CAT_DEFAULT aggregator_debug + +/* Locking order, locks in this element must always be taken in this order + * + * standard sink pad stream lock -> GST_PAD_STREAM_LOCK (aggpad) + * Aggregator pad flush lock -> PAD_FLUSH_LOCK(aggpad) + * standard src pad stream lock -> GST_PAD_STREAM_LOCK (srcpad) + * Aggregator src lock -> SRC_LOCK(agg) w/ SRC_WAIT/BROADCAST + * standard element object lock -> GST_OBJECT_LOCK(agg) + * Aggregator pad lock -> PAD_LOCK (aggpad) w/ PAD_WAIT/BROADCAST_EVENT(aggpad) + * standard src pad object lock -> GST_OBJECT_LOCK(srcpad) + * standard sink pad object lock -> GST_OBJECT_LOCK(aggpad) + */ + +/* GstAggregatorPad definitions */ +#define PAD_LOCK(pad) G_STMT_START { \ + GST_TRACE_OBJECT (pad, "Taking PAD lock from thread %p", \ + g_thread_self()); \ + g_mutex_lock(&pad->priv->lock); \ + GST_TRACE_OBJECT (pad, "Took PAD lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define PAD_UNLOCK(pad) G_STMT_START { \ + GST_TRACE_OBJECT (pad, "Releasing PAD lock from thread %p", \ + g_thread_self()); \ + g_mutex_unlock(&pad->priv->lock); \ + GST_TRACE_OBJECT (pad, "Release PAD lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + + +#define PAD_WAIT_EVENT(pad) G_STMT_START { \ + GST_LOG_OBJECT (pad, "Waiting for buffer to be consumed thread %p", \ + g_thread_self()); \ + g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \ + (&((GstAggregatorPad*)pad)->priv->lock)); \ + GST_LOG_OBJECT (pad, "DONE Waiting for buffer to be consumed on thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define PAD_BROADCAST_EVENT(pad) G_STMT_START { \ + GST_LOG_OBJECT (pad, "Signaling buffer consumed from thread %p", \ + g_thread_self()); \ + g_cond_broadcast(&(((GstAggregatorPad* )pad)->priv->event_cond)); \ + } G_STMT_END + + +#define PAD_FLUSH_LOCK(pad) G_STMT_START { \ + GST_TRACE_OBJECT (pad, "Taking lock from thread %p", \ + g_thread_self()); \ + g_mutex_lock(&pad->priv->flush_lock); \ + GST_TRACE_OBJECT (pad, "Took lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define PAD_FLUSH_UNLOCK(pad) G_STMT_START { \ + GST_TRACE_OBJECT (pad, "Releasing lock from thread %p", \ + g_thread_self()); \ + g_mutex_unlock(&pad->priv->flush_lock); \ + GST_TRACE_OBJECT (pad, "Release lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define SRC_LOCK(self) G_STMT_START { \ + GST_TRACE_OBJECT (self, "Taking src lock from thread %p", \ + g_thread_self()); \ + g_mutex_lock(&self->priv->src_lock); \ + GST_TRACE_OBJECT (self, "Took src lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define SRC_UNLOCK(self) G_STMT_START { \ + GST_TRACE_OBJECT (self, "Releasing src lock from thread %p", \ + g_thread_self()); \ + g_mutex_unlock(&self->priv->src_lock); \ + GST_TRACE_OBJECT (self, "Released src lock from thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define SRC_WAIT(self) G_STMT_START { \ + GST_LOG_OBJECT (self, "Waiting for src on thread %p", \ + g_thread_self()); \ + g_cond_wait(&(self->priv->src_cond), &(self->priv->src_lock)); \ + GST_LOG_OBJECT (self, "DONE Waiting for src on thread %p", \ + g_thread_self()); \ + } G_STMT_END + +#define SRC_BROADCAST(self) G_STMT_START { \ + GST_LOG_OBJECT (self, "Signaling src from thread %p", \ + g_thread_self()); \ + if (self->priv->aggregate_id) \ + gst_clock_id_unschedule (self->priv->aggregate_id); \ + g_cond_broadcast(&(self->priv->src_cond)); \ + } G_STMT_END + +struct _GstAggregatorPadPrivate +{ + /* Following fields are protected by the PAD_LOCK */ + GstFlowReturn flow_return; + + guint32 last_flush_start_seqnum; + guint32 last_flush_stop_seqnum; + + gboolean first_buffer; + + GQueue data; /* buffers, events and queries */ + GstBuffer *clipped_buffer; + guint num_buffers; + + /* used to track fill state of queues, only used with live-src and when + * latency property is set to > 0 */ + GstClockTime head_position; + GstClockTime tail_position; + GstClockTime head_time; /* running time */ + GstClockTime tail_time; + GstClockTime time_level; /* how much head is ahead of tail */ + GstSegment head_segment; /* segment before the queue */ + + gboolean negotiated; + + gboolean eos; + + GMutex lock; + GCond event_cond; + /* This lock prevents a flush start processing happening while + * the chain function is also happening. + */ + GMutex flush_lock; + + /* properties */ + gboolean emit_signals; +}; + +/* Must be called with PAD_LOCK held */ +static void +gst_aggregator_pad_reset_unlocked (GstAggregatorPad * aggpad) +{ + aggpad->priv->eos = FALSE; + aggpad->priv->flow_return = GST_FLOW_OK; + GST_OBJECT_LOCK (aggpad); + gst_segment_init (&aggpad->segment, GST_FORMAT_UNDEFINED); + gst_segment_init (&aggpad->priv->head_segment, GST_FORMAT_UNDEFINED); + GST_OBJECT_UNLOCK (aggpad); + aggpad->priv->head_position = GST_CLOCK_TIME_NONE; + aggpad->priv->tail_position = GST_CLOCK_TIME_NONE; + aggpad->priv->head_time = GST_CLOCK_TIME_NONE; + aggpad->priv->tail_time = GST_CLOCK_TIME_NONE; + aggpad->priv->time_level = 0; + aggpad->priv->first_buffer = TRUE; +} + +static gboolean +gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg) +{ + GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad); + + PAD_LOCK (aggpad); + gst_aggregator_pad_reset_unlocked (aggpad); + PAD_UNLOCK (aggpad); + + if (klass->flush) + return (klass->flush (aggpad, agg) == GST_FLOW_OK); + + return TRUE; +} + +/************************************* + * GstAggregator implementation * + *************************************/ +static GstElementClass *aggregator_parent_class = NULL; +static gint aggregator_private_offset = 0; + +/* All members are protected by the object lock unless otherwise noted */ + +struct _GstAggregatorPrivate +{ + gint max_padserial; + + /* Our state is >= PAUSED */ + gboolean running; /* protected by src_lock */ + + /* seqnum from last seek or common seqnum to flush start events received + * on all pads, for flushing without a seek */ + guint32 next_seqnum; + /* seqnum to apply to synthetic segment/eos events */ + guint32 seqnum; + gboolean send_stream_start; /* protected by srcpad stream lock */ + gboolean send_segment; + gboolean flushing; + gboolean send_eos; /* protected by srcpad stream lock */ + + GstCaps *srccaps; /* protected by the srcpad stream lock */ + + GstTagList *tags; + gboolean tags_changed; + + gboolean peer_latency_live; /* protected by src_lock */ + GstClockTime peer_latency_min; /* protected by src_lock */ + GstClockTime peer_latency_max; /* protected by src_lock */ + gboolean has_peer_latency; /* protected by src_lock */ + + GstClockTime sub_latency_min; /* protected by src_lock */ + GstClockTime sub_latency_max; /* protected by src_lock */ + + GstClockTime upstream_latency_min; /* protected by src_lock */ + + /* aggregate */ + GstClockID aggregate_id; /* protected by src_lock */ + GMutex src_lock; + GCond src_cond; + + gboolean first_buffer; /* protected by object lock */ + GstAggregatorStartTimeSelection start_time_selection; + GstClockTime start_time; + + /* protected by the object lock */ + GstQuery *allocation_query; + GstAllocator *allocator; + GstBufferPool *pool; + GstAllocationParams allocation_params; + + /* properties */ + gint64 latency; /* protected by both src_lock and all pad locks */ +}; + +/* Seek event forwarding helper */ +typedef struct +{ + /* parameters */ + GstEvent *event; + gboolean flush; + gboolean only_to_active_pads; + + /* results */ + gboolean result; + gboolean one_actually_seeked; +} EventData; + +#define DEFAULT_LATENCY 0 +#define DEFAULT_MIN_UPSTREAM_LATENCY 0 +#define DEFAULT_START_TIME_SELECTION GST_AGGREGATOR_START_TIME_SELECTION_ZERO +#define DEFAULT_START_TIME (-1) + +enum +{ + PROP_0, + PROP_LATENCY, + PROP_MIN_UPSTREAM_LATENCY, + PROP_START_TIME_SELECTION, + PROP_START_TIME, + PROP_LAST +}; + +static GstFlowReturn gst_aggregator_pad_chain_internal (GstAggregator * self, + GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head); + +static gboolean +gst_aggregator_pad_queue_is_empty (GstAggregatorPad * pad) +{ + return (g_queue_peek_tail (&pad->priv->data) == NULL && + pad->priv->clipped_buffer == NULL); +} + +/* Will return FALSE if there's no buffer available on every non-EOS pad, or + * if at least one of the pads has an event or query at the top of its queue. + * + * Only returns TRUE if all non-EOS pads have a buffer available at the top of + * their queue or a clipped buffer already. + */ +static gboolean +gst_aggregator_check_pads_ready (GstAggregator * self, + gboolean * have_event_or_query_ret) +{ + GstAggregatorPad *pad = NULL; + GList *l, *sinkpads; + gboolean have_buffer = TRUE; + gboolean have_event_or_query = FALSE; + + GST_LOG_OBJECT (self, "checking pads"); + + GST_OBJECT_LOCK (self); + + sinkpads = GST_ELEMENT_CAST (self)->sinkpads; + if (sinkpads == NULL) + goto no_sinkpads; + + for (l = sinkpads; l != NULL; l = l->next) { + pad = l->data; + + PAD_LOCK (pad); + + /* If there's an event or query at the top of the queue and we don't yet + * have taken the top buffer out and stored it as clip_buffer, remember + * that and exit the loop. We first have to handle all events/queries + * before we handle any buffers. */ + if (!pad->priv->clipped_buffer + && (GST_IS_EVENT (g_queue_peek_tail (&pad->priv->data)) + || GST_IS_QUERY (g_queue_peek_tail (&pad->priv->data)))) { + PAD_UNLOCK (pad); + have_event_or_query = TRUE; + break; + } + + /* Otherwise check if we have a clipped buffer or a buffer at the top of + * the queue, and if not then this pad is not ready unless it is also EOS */ + if (!pad->priv->clipped_buffer + && !GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) { + /* We must not have any buffers at all in this pad then as otherwise we + * would've had an event/query at the top of the queue */ + g_assert (pad->priv->num_buffers == 0); + + /* Only consider this pad as worth waiting for if it's not already EOS. + * There's no point in waiting for buffers on EOS pads */ + if (!pad->priv->eos) + have_buffer = FALSE; + } else if (self->priv->peer_latency_live) { + /* In live mode, having a single pad with buffers is enough to + * generate a start time from it. In non-live mode all pads need + * to have a buffer + */ + self->priv->first_buffer = FALSE; + } + + PAD_UNLOCK (pad); + } + + if (have_event_or_query) + goto pad_not_ready_but_event_or_query; + + if (!have_buffer) + goto pad_not_ready; + + if (have_buffer) + self->priv->first_buffer = FALSE; + + GST_OBJECT_UNLOCK (self); + GST_LOG_OBJECT (self, "pads are ready"); + + if (have_event_or_query_ret) + *have_event_or_query_ret = have_event_or_query; + + return TRUE; + +no_sinkpads: + { + GST_LOG_OBJECT (self, "pads not ready: no sink pads"); + GST_OBJECT_UNLOCK (self); + + if (have_event_or_query_ret) + *have_event_or_query_ret = have_event_or_query; + + return FALSE; + } +pad_not_ready: + { + GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet"); + GST_OBJECT_UNLOCK (self); + + if (have_event_or_query_ret) + *have_event_or_query_ret = have_event_or_query; + + return FALSE; + } +pad_not_ready_but_event_or_query: + { + GST_LOG_OBJECT (pad, + "pad not ready to be aggregated yet, need to handle serialized event or query first"); + GST_OBJECT_UNLOCK (self); + + if (have_event_or_query_ret) + *have_event_or_query_ret = have_event_or_query; + + return FALSE; + } +} + +static void +gst_aggregator_reset_flow_values (GstAggregator * self) +{ + GST_OBJECT_LOCK (self); + self->priv->send_stream_start = TRUE; + self->priv->send_segment = TRUE; + gst_segment_init (&GST_AGGREGATOR_PAD (self->srcpad)->segment, + GST_FORMAT_TIME); + /* Initialize to -1 so we set it to the start position once the first buffer + * is handled in gst_aggregator_pad_chain_internal() */ + GST_AGGREGATOR_PAD (self->srcpad)->segment.position = -1; + self->priv->first_buffer = TRUE; + GST_OBJECT_UNLOCK (self); +} + +static inline void +gst_aggregator_push_mandatory_events (GstAggregator * self) +{ + GstAggregatorPrivate *priv = self->priv; + GstEvent *segment = NULL; + GstEvent *tags = NULL; + + if (self->priv->send_stream_start) { + gchar s_id[32]; + + GST_INFO_OBJECT (self, "pushing stream start"); + /* stream-start (FIXME: create id based on input ids) */ + g_snprintf (s_id, sizeof (s_id), "agg-%08x", g_random_int ()); + if (!gst_pad_push_event (GST_PAD (self->srcpad), + gst_event_new_stream_start (s_id))) { + GST_WARNING_OBJECT (self->srcpad, "Sending stream start event failed"); + } + self->priv->send_stream_start = FALSE; + } + + if (self->priv->srccaps) { + + GST_INFO_OBJECT (self, "pushing caps: %" GST_PTR_FORMAT, + self->priv->srccaps); + if (!gst_pad_push_event (GST_PAD (self->srcpad), + gst_event_new_caps (self->priv->srccaps))) { + GST_WARNING_OBJECT (self->srcpad, "Sending caps event failed"); + } + gst_caps_unref (self->priv->srccaps); + self->priv->srccaps = NULL; + } + + GST_OBJECT_LOCK (self); + if (self->priv->send_segment && !self->priv->flushing) { + segment = + gst_event_new_segment (&GST_AGGREGATOR_PAD (self->srcpad)->segment); + + if (!self->priv->seqnum) + /* This code-path is in preparation to be able to run without a source + * connected. Then we won't have a seq-num from a segment event. */ + self->priv->seqnum = gst_event_get_seqnum (segment); + else + gst_event_set_seqnum (segment, self->priv->seqnum); + self->priv->send_segment = FALSE; + + GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segment); + } + + if (priv->tags && priv->tags_changed && !self->priv->flushing) { + tags = gst_event_new_tag (gst_tag_list_ref (priv->tags)); + priv->tags_changed = FALSE; + } + GST_OBJECT_UNLOCK (self); + + if (segment) + gst_pad_push_event (self->srcpad, segment); + if (tags) + gst_pad_push_event (self->srcpad, tags); + +} + +/** + * gst_aggregator_set_src_caps: + * @self: The #GstAggregator + * @caps: The #GstCaps to set on the src pad. + * + * Sets the caps to be used on the src pad. + */ +void +gst_aggregator_set_src_caps (GstAggregator * self, GstCaps * caps) +{ + GST_PAD_STREAM_LOCK (self->srcpad); + gst_caps_replace (&self->priv->srccaps, caps); + gst_aggregator_push_mandatory_events (self); + GST_PAD_STREAM_UNLOCK (self->srcpad); +} + +static GstFlowReturn +gst_aggregator_default_finish_buffer (GstAggregator * self, GstBuffer * buffer) +{ + gst_aggregator_push_mandatory_events (self); + + GST_OBJECT_LOCK (self); + if (!self->priv->flushing && gst_pad_is_active (self->srcpad)) { + GST_TRACE_OBJECT (self, "pushing buffer %" GST_PTR_FORMAT, buffer); + GST_OBJECT_UNLOCK (self); + return gst_pad_push (self->srcpad, buffer); + } else { + GST_INFO_OBJECT (self, "Not pushing (active: %i, flushing: %i)", + self->priv->flushing, gst_pad_is_active (self->srcpad)); + GST_OBJECT_UNLOCK (self); + gst_buffer_unref (buffer); + return GST_FLOW_OK; + } +} + +/** + * gst_aggregator_finish_buffer: + * @aggregator: The #GstAggregator + * @buffer: (transfer full): the #GstBuffer to push. + * + * This method will push the provided output buffer downstream. If needed, + * mandatory events such as stream-start, caps, and segment events will be + * sent before pushing the buffer. + */ +GstFlowReturn +gst_aggregator_finish_buffer (GstAggregator * aggregator, GstBuffer * buffer) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (aggregator); + + g_assert (klass->finish_buffer != NULL); + + return klass->finish_buffer (aggregator, buffer); +} + +static void +gst_aggregator_push_eos (GstAggregator * self) +{ + GstEvent *event; + gst_aggregator_push_mandatory_events (self); + + event = gst_event_new_eos (); + + GST_OBJECT_LOCK (self); + self->priv->send_eos = FALSE; + gst_event_set_seqnum (event, self->priv->seqnum); + GST_OBJECT_UNLOCK (self); + + gst_pad_push_event (self->srcpad, event); +} + +static GstClockTime +gst_aggregator_get_next_time (GstAggregator * self) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + + if (klass->get_next_time) + return klass->get_next_time (self); + + return GST_CLOCK_TIME_NONE; +} + +static gboolean +gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout) +{ + GstClockTime latency; + GstClockTime start; + gboolean res; + gboolean have_event_or_query = FALSE; + + *timeout = FALSE; + + SRC_LOCK (self); + + latency = gst_aggregator_get_latency_unlocked (self); + + if (gst_aggregator_check_pads_ready (self, &have_event_or_query)) { + GST_DEBUG_OBJECT (self, "all pads have data"); + SRC_UNLOCK (self); + + return TRUE; + } + + /* If we have an event or query, immediately return FALSE instead of waiting + * and handle it immediately */ + if (have_event_or_query) { + GST_DEBUG_OBJECT (self, "Have serialized event or query to handle first"); + SRC_UNLOCK (self); + return FALSE; + } + + /* Before waiting, check if we're actually still running */ + if (!self->priv->running || !self->priv->send_eos) { + SRC_UNLOCK (self); + + return FALSE; + } + + start = gst_aggregator_get_next_time (self); + + /* If we're not live, or if we use the running time + * of the first buffer as start time, we wait until + * all pads have buffers. + * Otherwise (i.e. if we are live!), we wait on the clock + * and if a pad does not have a buffer in time we ignore + * that pad. + */ + GST_OBJECT_LOCK (self); + if (!GST_CLOCK_TIME_IS_VALID (latency) || + !GST_IS_CLOCK (GST_ELEMENT_CLOCK (self)) || + !GST_CLOCK_TIME_IS_VALID (start) || + (self->priv->first_buffer + && self->priv->start_time_selection == + GST_AGGREGATOR_START_TIME_SELECTION_FIRST)) { + /* We wake up here when something happened, and below + * then check if we're ready now. If we return FALSE, + * we will be directly called again. + */ + GST_OBJECT_UNLOCK (self); + SRC_WAIT (self); + } else { + GstClockTime base_time, time; + GstClock *clock; + GstClockReturn status; + GstClockTimeDiff jitter; + + GST_DEBUG_OBJECT (self, "got subclass start time: %" GST_TIME_FORMAT, + GST_TIME_ARGS (start)); + + base_time = GST_ELEMENT_CAST (self)->base_time; + clock = gst_object_ref (GST_ELEMENT_CLOCK (self)); + GST_OBJECT_UNLOCK (self); + + time = base_time + start; + time += latency; + + GST_DEBUG_OBJECT (self, "possibly waiting for clock to reach %" + GST_TIME_FORMAT " (base %" GST_TIME_FORMAT " start %" GST_TIME_FORMAT + " latency %" GST_TIME_FORMAT " current %" GST_TIME_FORMAT ")", + GST_TIME_ARGS (time), + GST_TIME_ARGS (base_time), + GST_TIME_ARGS (start), GST_TIME_ARGS (latency), + GST_TIME_ARGS (gst_clock_get_time (clock))); + + self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time); + gst_object_unref (clock); + SRC_UNLOCK (self); + + jitter = 0; + status = gst_clock_id_wait (self->priv->aggregate_id, &jitter); + + SRC_LOCK (self); + if (self->priv->aggregate_id) { + gst_clock_id_unref (self->priv->aggregate_id); + self->priv->aggregate_id = NULL; + } + + GST_DEBUG_OBJECT (self, + "clock returned %d (jitter: %" GST_STIME_FORMAT ")", + status, GST_STIME_ARGS (jitter)); + + /* we timed out */ + if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) { + SRC_UNLOCK (self); + *timeout = TRUE; + return TRUE; + } + } + + res = gst_aggregator_check_pads_ready (self, NULL); + SRC_UNLOCK (self); + + return res; +} + +typedef struct +{ + gboolean processed_event; + GstFlowReturn flow_ret; +} DoHandleEventsAndQueriesData; + +static gboolean +gst_aggregator_do_events_and_queries (GstElement * self, GstPad * epad, + gpointer user_data) +{ + GstAggregatorPad *pad = GST_AGGREGATOR_PAD_CAST (epad); + GstAggregator *aggregator = GST_AGGREGATOR_CAST (self); + GstEvent *event = NULL; + GstQuery *query = NULL; + GstAggregatorClass *klass = NULL; + DoHandleEventsAndQueriesData *data = user_data; + + do { + event = NULL; + query = NULL; + + PAD_LOCK (pad); + if (pad->priv->clipped_buffer == NULL && + !GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) { + if (GST_IS_EVENT (g_queue_peek_tail (&pad->priv->data))) + event = gst_event_ref (g_queue_peek_tail (&pad->priv->data)); + if (GST_IS_QUERY (g_queue_peek_tail (&pad->priv->data))) + query = g_queue_peek_tail (&pad->priv->data); + } + PAD_UNLOCK (pad); + if (event || query) { + gboolean ret; + + data->processed_event = TRUE; + if (klass == NULL) + klass = GST_AGGREGATOR_GET_CLASS (self); + + if (event) { + GST_LOG_OBJECT (pad, "Processing %" GST_PTR_FORMAT, event); + gst_event_ref (event); + ret = klass->sink_event (aggregator, pad, event); + + PAD_LOCK (pad); + if (GST_EVENT_TYPE (event) == GST_EVENT_CAPS) { + pad->priv->negotiated = ret; + if (!ret) + pad->priv->flow_return = data->flow_ret = GST_FLOW_NOT_NEGOTIATED; + } + if (g_queue_peek_tail (&pad->priv->data) == event) + gst_event_unref (g_queue_pop_tail (&pad->priv->data)); + gst_event_unref (event); + } else if (query) { + GST_LOG_OBJECT (pad, "Processing %" GST_PTR_FORMAT, query); + ret = klass->sink_query (aggregator, pad, query); + + PAD_LOCK (pad); + if (g_queue_peek_tail (&pad->priv->data) == query) { + GstStructure *s; + + s = gst_query_writable_structure (query); + gst_structure_set (s, "gst-aggregator-retval", G_TYPE_BOOLEAN, ret, + NULL); + g_queue_pop_tail (&pad->priv->data); + } + } + + PAD_BROADCAST_EVENT (pad); + PAD_UNLOCK (pad); + } + } while (event || query); + + return TRUE; +} + +static gboolean +gst_aggregator_pad_skip_buffers (GstElement * self, GstPad * epad, + gpointer user_data) +{ + GList *item; + GstAggregatorPad *aggpad = (GstAggregatorPad *) epad; + GstAggregator *agg = (GstAggregator *) self; + GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad); + + if (!klass->skip_buffer) + return FALSE; + + PAD_LOCK (aggpad); + + item = g_queue_peek_head_link (&aggpad->priv->data); + while (item) { + GList *next = item->next; + + if (GST_IS_BUFFER (item->data) + && klass->skip_buffer (aggpad, agg, item->data)) { + GST_LOG_OBJECT (aggpad, "Skipping %" GST_PTR_FORMAT, item->data); + gst_aggregator_pad_buffer_consumed (aggpad, GST_BUFFER (item->data)); + gst_buffer_unref (item->data); + g_queue_delete_link (&aggpad->priv->data, item); + } else { + break; + } + + item = next; + } + + PAD_UNLOCK (aggpad); + + return TRUE; +} + +static void +gst_aggregator_pad_set_flushing (GstAggregatorPad * aggpad, + GstFlowReturn flow_return, gboolean full) +{ + GList *item; + + PAD_LOCK (aggpad); + if (flow_return == GST_FLOW_NOT_LINKED) + aggpad->priv->flow_return = MIN (flow_return, aggpad->priv->flow_return); + else + aggpad->priv->flow_return = flow_return; + + item = g_queue_peek_head_link (&aggpad->priv->data); + while (item) { + GList *next = item->next; + + /* In partial flush, we do like the pad, we get rid of non-sticky events + * and EOS/SEGMENT. + */ + if (full || GST_IS_BUFFER (item->data) || + GST_EVENT_TYPE (item->data) == GST_EVENT_EOS || + GST_EVENT_TYPE (item->data) == GST_EVENT_SEGMENT || + !GST_EVENT_IS_STICKY (item->data)) { + if (!GST_IS_QUERY (item->data)) + gst_mini_object_unref (item->data); + g_queue_delete_link (&aggpad->priv->data, item); + } + item = next; + } + aggpad->priv->num_buffers = 0; + gst_buffer_replace (&aggpad->priv->clipped_buffer, NULL); + + PAD_BROADCAST_EVENT (aggpad); + PAD_UNLOCK (aggpad); +} + +static GstFlowReturn +gst_aggregator_default_update_src_caps (GstAggregator * agg, GstCaps * caps, + GstCaps ** ret) +{ + *ret = gst_caps_ref (caps); + + return GST_FLOW_OK; +} + +static GstCaps * +gst_aggregator_default_fixate_src_caps (GstAggregator * agg, GstCaps * caps) +{ + caps = gst_caps_fixate (caps); + + return caps; +} + +static gboolean +gst_aggregator_default_negotiated_src_caps (GstAggregator * agg, GstCaps * caps) +{ + return TRUE; +} + + +/* takes ownership of the pool, allocator and query */ +static gboolean +gst_aggregator_set_allocation (GstAggregator * self, + GstBufferPool * pool, GstAllocator * allocator, + GstAllocationParams * params, GstQuery * query) +{ + GstAllocator *oldalloc; + GstBufferPool *oldpool; + GstQuery *oldquery; + + GST_DEBUG ("storing allocation query"); + + GST_OBJECT_LOCK (self); + oldpool = self->priv->pool; + self->priv->pool = pool; + + oldalloc = self->priv->allocator; + self->priv->allocator = allocator; + + oldquery = self->priv->allocation_query; + self->priv->allocation_query = query; + + if (params) + self->priv->allocation_params = *params; + else + gst_allocation_params_init (&self->priv->allocation_params); + GST_OBJECT_UNLOCK (self); + + if (oldpool) { + GST_DEBUG_OBJECT (self, "deactivating old pool %p", oldpool); + gst_buffer_pool_set_active (oldpool, FALSE); + gst_object_unref (oldpool); + } + if (oldalloc) { + gst_object_unref (oldalloc); + } + if (oldquery) { + gst_query_unref (oldquery); + } + return TRUE; +} + + +static gboolean +gst_aggregator_decide_allocation (GstAggregator * self, GstQuery * query) +{ + GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (self); + + if (aggclass->decide_allocation) + if (!aggclass->decide_allocation (self, query)) + return FALSE; + + return TRUE; +} + +static gboolean +gst_aggregator_do_allocation (GstAggregator * self, GstCaps * caps) +{ + GstQuery *query; + gboolean result = TRUE; + GstBufferPool *pool = NULL; + GstAllocator *allocator; + GstAllocationParams params; + + /* find a pool for the negotiated caps now */ + GST_DEBUG_OBJECT (self, "doing allocation query"); + query = gst_query_new_allocation (caps, TRUE); + if (!gst_pad_peer_query (self->srcpad, query)) { + /* not a problem, just debug a little */ + GST_DEBUG_OBJECT (self, "peer ALLOCATION query failed"); + } + + GST_DEBUG_OBJECT (self, "calling decide_allocation"); + result = gst_aggregator_decide_allocation (self, query); + + GST_DEBUG_OBJECT (self, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, result, + query); + + if (!result) + goto no_decide_allocation; + + /* we got configuration from our peer or the decide_allocation method, + * parse them */ + if (gst_query_get_n_allocation_params (query) > 0) { + gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms); + } else { + allocator = NULL; + gst_allocation_params_init (¶ms); + } + + if (gst_query_get_n_allocation_pools (query) > 0) + gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL); + + /* now store */ + result = + gst_aggregator_set_allocation (self, pool, allocator, ¶ms, query); + + return result; + + /* Errors */ +no_decide_allocation: + { + GST_WARNING_OBJECT (self, "Failed to decide allocation"); + gst_query_unref (query); + + return result; + } + +} + +static gboolean +gst_aggregator_default_negotiate (GstAggregator * self) +{ + GstAggregatorClass *agg_klass = GST_AGGREGATOR_GET_CLASS (self); + GstCaps *downstream_caps, *template_caps, *caps = NULL; + GstFlowReturn ret = GST_FLOW_OK; + + template_caps = gst_pad_get_pad_template_caps (self->srcpad); + downstream_caps = gst_pad_peer_query_caps (self->srcpad, template_caps); + + if (gst_caps_is_empty (downstream_caps)) { + GST_INFO_OBJECT (self, "Downstream caps (%" + GST_PTR_FORMAT ") not compatible with pad template caps (%" + GST_PTR_FORMAT ")", downstream_caps, template_caps); + ret = GST_FLOW_NOT_NEGOTIATED; + goto done; + } + + g_assert (agg_klass->update_src_caps); + GST_DEBUG_OBJECT (self, "updating caps from %" GST_PTR_FORMAT, + downstream_caps); + ret = agg_klass->update_src_caps (self, downstream_caps, &caps); + if (ret < GST_FLOW_OK) { + GST_WARNING_OBJECT (self, "Subclass failed to update provided caps"); + goto done; + } else if (ret == GST_AGGREGATOR_FLOW_NEED_DATA) { + GST_DEBUG_OBJECT (self, "Subclass needs more data to decide on caps"); + goto done; + } + if ((caps == NULL || gst_caps_is_empty (caps)) && ret >= GST_FLOW_OK) { + ret = GST_FLOW_NOT_NEGOTIATED; + goto done; + } + GST_DEBUG_OBJECT (self, " to %" GST_PTR_FORMAT, caps); + +#ifdef GST_ENABLE_EXTRA_CHECKS + if (!gst_caps_is_subset (caps, template_caps)) { + GstCaps *intersection; + + GST_ERROR_OBJECT (self, + "update_src_caps returned caps %" GST_PTR_FORMAT + " which are not a real subset of the template caps %" + GST_PTR_FORMAT, caps, template_caps); + g_warning ("%s: update_src_caps returned caps which are not a real " + "subset of the filter caps", GST_ELEMENT_NAME (self)); + + intersection = + gst_caps_intersect_full (template_caps, caps, GST_CAPS_INTERSECT_FIRST); + gst_caps_unref (caps); + caps = intersection; + } +#endif + + if (gst_caps_is_any (caps)) { + goto done; + } + + if (!gst_caps_is_fixed (caps)) { + g_assert (agg_klass->fixate_src_caps); + + GST_DEBUG_OBJECT (self, "fixate caps from %" GST_PTR_FORMAT, caps); + if (!(caps = agg_klass->fixate_src_caps (self, caps))) { + GST_WARNING_OBJECT (self, "Subclass failed to fixate provided caps"); + ret = GST_FLOW_NOT_NEGOTIATED; + goto done; + } + GST_DEBUG_OBJECT (self, " to %" GST_PTR_FORMAT, caps); + } + + if (agg_klass->negotiated_src_caps) { + if (!agg_klass->negotiated_src_caps (self, caps)) { + GST_WARNING_OBJECT (self, "Subclass failed to accept negotiated caps"); + ret = GST_FLOW_NOT_NEGOTIATED; + goto done; + } + } + + gst_aggregator_set_src_caps (self, caps); + + if (!gst_aggregator_do_allocation (self, caps)) { + GST_WARNING_OBJECT (self, "Allocation negotiation failed"); + ret = GST_FLOW_NOT_NEGOTIATED; + } + +done: + gst_caps_unref (downstream_caps); + gst_caps_unref (template_caps); + + if (caps) + gst_caps_unref (caps); + + return ret >= GST_FLOW_OK || ret == GST_AGGREGATOR_FLOW_NEED_DATA; +} + +/* WITH SRC_LOCK held */ +static gboolean +gst_aggregator_negotiate_unlocked (GstAggregator * self) +{ + GstAggregatorClass *agg_klass = GST_AGGREGATOR_GET_CLASS (self); + + if (agg_klass->negotiate) + return agg_klass->negotiate (self); + + return TRUE; +} + +/** + * gst_aggregator_negotiate: + * @self: a #GstAggregator + * + * Negotiates src pad caps with downstream elements. + * Unmarks GST_PAD_FLAG_NEED_RECONFIGURE in any case. But marks it again + * if #GstAggregatorClass.negotiate() fails. + * + * Returns: %TRUE if the negotiation succeeded, else %FALSE. + * + * Since: 1.18 + */ +gboolean +gst_aggregator_negotiate (GstAggregator * self) +{ + gboolean ret = TRUE; + + g_return_val_if_fail (GST_IS_AGGREGATOR (self), FALSE); + + GST_PAD_STREAM_LOCK (GST_AGGREGATOR_SRC_PAD (self)); + gst_pad_check_reconfigure (GST_AGGREGATOR_SRC_PAD (self)); + ret = gst_aggregator_negotiate_unlocked (self); + if (!ret) + gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self)); + GST_PAD_STREAM_UNLOCK (GST_AGGREGATOR_SRC_PAD (self)); + + return ret; +} + +static void +gst_aggregator_aggregate_func (GstAggregator * self) +{ + GstAggregatorPrivate *priv = self->priv; + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + gboolean timeout = FALSE; + + if (self->priv->running == FALSE) { + GST_DEBUG_OBJECT (self, "Not running anymore"); + return; + } + + GST_LOG_OBJECT (self, "Checking aggregate"); + while (priv->send_eos && priv->running) { + GstFlowReturn flow_return = GST_FLOW_OK; + DoHandleEventsAndQueriesData events_query_data = { FALSE, GST_FLOW_OK }; + + gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self), + gst_aggregator_do_events_and_queries, &events_query_data); + + if ((flow_return = events_query_data.flow_ret) != GST_FLOW_OK) + goto handle_error; + + if (self->priv->peer_latency_live) + gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self), + gst_aggregator_pad_skip_buffers, NULL); + + /* Ensure we have buffers ready (either in clipped_buffer or at the head of + * the queue */ + if (!gst_aggregator_wait_and_check (self, &timeout)) + continue; + + if (gst_pad_check_reconfigure (GST_AGGREGATOR_SRC_PAD (self))) { + if (!gst_aggregator_negotiate_unlocked (self)) { + gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self)); + if (GST_PAD_IS_FLUSHING (GST_AGGREGATOR_SRC_PAD (self))) { + flow_return = GST_FLOW_FLUSHING; + } else { + flow_return = GST_FLOW_NOT_NEGOTIATED; + } + } + } + + if (timeout || flow_return >= GST_FLOW_OK) { + GST_TRACE_OBJECT (self, "Actually aggregating!"); + flow_return = klass->aggregate (self, timeout); + } + + if (flow_return == GST_AGGREGATOR_FLOW_NEED_DATA) + continue; + + GST_OBJECT_LOCK (self); + if (flow_return == GST_FLOW_FLUSHING && priv->flushing) { + /* We don't want to set the pads to flushing, but we want to + * stop the thread, so just break here */ + GST_OBJECT_UNLOCK (self); + break; + } + GST_OBJECT_UNLOCK (self); + + if (flow_return == GST_FLOW_EOS || flow_return == GST_FLOW_ERROR) { + gst_aggregator_push_eos (self); + } + + handle_error: + GST_LOG_OBJECT (self, "flow return is %s", gst_flow_get_name (flow_return)); + + if (flow_return != GST_FLOW_OK) { + GList *item; + + GST_OBJECT_LOCK (self); + for (item = GST_ELEMENT (self)->sinkpads; item; item = item->next) { + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data); + + gst_aggregator_pad_set_flushing (aggpad, flow_return, TRUE); + } + GST_OBJECT_UNLOCK (self); + break; + } + } + + /* Pause the task here, the only ways to get here are: + * 1) We're stopping, in which case the task is stopped anyway + * 2) We got a flow error above, in which case it might take + * some time to forward the flow return upstream and we + * would otherwise call the task function over and over + * again without doing anything + */ + gst_pad_pause_task (self->srcpad); +} + +static gboolean +gst_aggregator_start (GstAggregator * self) +{ + GstAggregatorClass *klass; + gboolean result; + + self->priv->send_stream_start = TRUE; + self->priv->send_segment = TRUE; + self->priv->send_eos = TRUE; + self->priv->srccaps = NULL; + + gst_aggregator_set_allocation (self, NULL, NULL, NULL, NULL); + + klass = GST_AGGREGATOR_GET_CLASS (self); + + if (klass->start) + result = klass->start (self); + else + result = TRUE; + + return result; +} + +static gboolean +gst_aggregator_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start) +{ + gboolean res = TRUE; + + GST_INFO_OBJECT (self, "%s srcpad task", + flush_start ? "Pausing" : "Stopping"); + + SRC_LOCK (self); + self->priv->running = FALSE; + SRC_BROADCAST (self); + SRC_UNLOCK (self); + + if (flush_start) { + res = gst_pad_push_event (self->srcpad, flush_start); + } + + gst_pad_stop_task (self->srcpad); + + return res; +} + +static void +gst_aggregator_start_srcpad_task (GstAggregator * self) +{ + GST_INFO_OBJECT (self, "Starting srcpad task"); + + self->priv->running = TRUE; + gst_pad_start_task (GST_PAD (self->srcpad), + (GstTaskFunction) gst_aggregator_aggregate_func, self, NULL); +} + +static GstFlowReturn +gst_aggregator_flush (GstAggregator * self) +{ + GstFlowReturn ret = GST_FLOW_OK; + GstAggregatorPrivate *priv = self->priv; + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + + GST_DEBUG_OBJECT (self, "Flushing everything"); + GST_OBJECT_LOCK (self); + priv->send_segment = TRUE; + priv->flushing = FALSE; + priv->tags_changed = FALSE; + GST_OBJECT_UNLOCK (self); + if (klass->flush) + ret = klass->flush (self); + + return ret; +} + + +/* Called with GstAggregator's object lock held */ + +static gboolean +gst_aggregator_all_flush_stop_received (GstAggregator * self, guint32 seqnum) +{ + GList *tmp; + GstAggregatorPad *tmppad; + + for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) { + tmppad = (GstAggregatorPad *) tmp->data; + + if (tmppad->priv->last_flush_stop_seqnum != seqnum) + return FALSE; + } + + return TRUE; +} + +/* Called with GstAggregator's object lock held */ + +static gboolean +gst_aggregator_all_flush_start_received (GstAggregator * self, guint32 seqnum) +{ + GList *tmp; + GstAggregatorPad *tmppad; + + for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) { + tmppad = (GstAggregatorPad *) tmp->data; + + if (tmppad->priv->last_flush_start_seqnum != seqnum) { + return FALSE; + } + } + + return TRUE; +} + +static void +gst_aggregator_flush_start (GstAggregator * self, GstAggregatorPad * aggpad, + GstEvent * event) +{ + GstAggregatorPrivate *priv = self->priv; + GstAggregatorPadPrivate *padpriv = aggpad->priv; + guint32 seqnum = gst_event_get_seqnum (event); + + gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, FALSE); + + PAD_FLUSH_LOCK (aggpad); + PAD_LOCK (aggpad); + padpriv->last_flush_start_seqnum = seqnum; + PAD_UNLOCK (aggpad); + + GST_OBJECT_LOCK (self); + + if (!priv->flushing && gst_aggregator_all_flush_start_received (self, seqnum)) { + /* Make sure we don't forward more than one FLUSH_START */ + priv->flushing = TRUE; + priv->next_seqnum = seqnum; + GST_OBJECT_UNLOCK (self); + + GST_INFO_OBJECT (self, "Flushing, pausing srcpad task"); + gst_aggregator_stop_srcpad_task (self, event); + + event = NULL; + } else { + gst_event_unref (event); + GST_OBJECT_UNLOCK (self); + } + + PAD_FLUSH_UNLOCK (aggpad); +} + +/* Must be called with the the PAD_LOCK held */ +static void +update_time_level (GstAggregatorPad * aggpad, gboolean head) +{ + GstAggregatorPadPrivate *priv = aggpad->priv; + + if (head) { + if (GST_CLOCK_TIME_IS_VALID (priv->head_position) && + priv->head_segment.format == GST_FORMAT_TIME) + priv->head_time = gst_segment_to_running_time (&priv->head_segment, + GST_FORMAT_TIME, priv->head_position); + else + priv->head_time = GST_CLOCK_TIME_NONE; + + if (!GST_CLOCK_TIME_IS_VALID (priv->tail_time)) + priv->tail_time = priv->head_time; + } else { + if (GST_CLOCK_TIME_IS_VALID (priv->tail_position) && + aggpad->segment.format == GST_FORMAT_TIME) + priv->tail_time = gst_segment_to_running_time (&aggpad->segment, + GST_FORMAT_TIME, priv->tail_position); + else + priv->tail_time = priv->head_time; + } + + if (priv->head_time == GST_CLOCK_TIME_NONE || + priv->tail_time == GST_CLOCK_TIME_NONE) { + priv->time_level = 0; + return; + } + + if (priv->tail_time > priv->head_time) + priv->time_level = 0; + else + priv->time_level = priv->head_time - priv->tail_time; +} + + +/* GstAggregator vmethods default implementations */ +static gboolean +gst_aggregator_default_sink_event (GstAggregator * self, + GstAggregatorPad * aggpad, GstEvent * event) +{ + gboolean res = TRUE; + GstPad *pad = GST_PAD (aggpad); + GstAggregatorPrivate *priv = self->priv; + + GST_DEBUG_OBJECT (aggpad, "Got event: %" GST_PTR_FORMAT, event); + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_FLUSH_START: + { + gst_aggregator_flush_start (self, aggpad, event); + /* We forward only in one case: right after flushing */ + event = NULL; + goto eat; + } + case GST_EVENT_FLUSH_STOP: + { + guint32 seqnum = gst_event_get_seqnum (event); + + PAD_FLUSH_LOCK (aggpad); + PAD_LOCK (aggpad); + aggpad->priv->last_flush_stop_seqnum = seqnum; + PAD_UNLOCK (aggpad); + + gst_aggregator_pad_flush (aggpad, self); + + GST_OBJECT_LOCK (self); + if (priv->flushing + && gst_aggregator_all_flush_stop_received (self, seqnum)) { + GST_OBJECT_UNLOCK (self); + /* That means we received FLUSH_STOP/FLUSH_STOP on + * all sinkpads -- Seeking is Done... sending FLUSH_STOP */ + gst_aggregator_flush (self); + gst_pad_push_event (self->srcpad, event); + event = NULL; + SRC_LOCK (self); + priv->send_eos = TRUE; + SRC_BROADCAST (self); + SRC_UNLOCK (self); + + GST_INFO_OBJECT (self, "Flush stopped"); + + gst_aggregator_start_srcpad_task (self); + } else { + GST_OBJECT_UNLOCK (self); + } + + PAD_FLUSH_UNLOCK (aggpad); + + /* We never forward the event */ + goto eat; + } + case GST_EVENT_EOS: + { + SRC_LOCK (self); + PAD_LOCK (aggpad); + g_assert (aggpad->priv->num_buffers == 0); + aggpad->priv->eos = TRUE; + PAD_UNLOCK (aggpad); + SRC_BROADCAST (self); + SRC_UNLOCK (self); + goto eat; + } + case GST_EVENT_SEGMENT: + { + PAD_LOCK (aggpad); + GST_OBJECT_LOCK (aggpad); + gst_event_copy_segment (event, &aggpad->segment); + /* We've got a new segment, tail_position is now meaningless + * and may interfere with the time_level calculation + */ + aggpad->priv->tail_position = GST_CLOCK_TIME_NONE; + update_time_level (aggpad, FALSE); + GST_OBJECT_UNLOCK (aggpad); + PAD_UNLOCK (aggpad); + + GST_OBJECT_LOCK (self); + self->priv->seqnum = gst_event_get_seqnum (event); + GST_OBJECT_UNLOCK (self); + goto eat; + } + case GST_EVENT_STREAM_START: + { + goto eat; + } + case GST_EVENT_GAP: + { + GstClockTime pts, endpts; + GstClockTime duration; + GstBuffer *gapbuf; + + gst_event_parse_gap (event, &pts, &duration); + + if (GST_CLOCK_TIME_IS_VALID (duration)) + endpts = pts + duration; + else + endpts = GST_CLOCK_TIME_NONE; + + GST_OBJECT_LOCK (aggpad); + res = gst_segment_clip (&aggpad->segment, GST_FORMAT_TIME, pts, endpts, + &pts, &endpts); + GST_OBJECT_UNLOCK (aggpad); + + if (!res) { + GST_WARNING_OBJECT (self, "GAP event outside segment, dropping"); + goto eat; + } + + if (GST_CLOCK_TIME_IS_VALID (endpts) && GST_CLOCK_TIME_IS_VALID (pts)) + duration = endpts - pts; + else + duration = GST_CLOCK_TIME_NONE; + + gapbuf = gst_buffer_new (); + GST_BUFFER_PTS (gapbuf) = pts; + GST_BUFFER_DURATION (gapbuf) = duration; + GST_BUFFER_FLAG_SET (gapbuf, GST_BUFFER_FLAG_GAP); + GST_BUFFER_FLAG_SET (gapbuf, GST_BUFFER_FLAG_DROPPABLE); + + /* Remove GAP event so we can replace it with the buffer */ + PAD_LOCK (aggpad); + if (g_queue_peek_tail (&aggpad->priv->data) == event) + gst_event_unref (g_queue_pop_tail (&aggpad->priv->data)); + PAD_UNLOCK (aggpad); + + if (gst_aggregator_pad_chain_internal (self, aggpad, gapbuf, FALSE) != + GST_FLOW_OK) { + GST_WARNING_OBJECT (self, "Failed to chain gap buffer"); + res = FALSE; + } + + goto eat; + } + case GST_EVENT_TAG: + goto eat; + default: + { + break; + } + } + + GST_DEBUG_OBJECT (pad, "Forwarding event: %" GST_PTR_FORMAT, event); + return gst_pad_event_default (pad, GST_OBJECT (self), event); + +eat: + GST_DEBUG_OBJECT (pad, "Eating event: %" GST_PTR_FORMAT, event); + if (event) + gst_event_unref (event); + + return res; +} + +/* Queue serialized events and let the others go through directly. + * The queued events with be handled from the src-pad task in + * gst_aggregator_do_events_and_queries(). + */ +static GstFlowReturn +gst_aggregator_default_sink_event_pre_queue (GstAggregator * self, + GstAggregatorPad * aggpad, GstEvent * event) +{ + GstFlowReturn ret = GST_FLOW_OK; + + if (GST_EVENT_IS_SERIALIZED (event) + && GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP) { + SRC_LOCK (self); + PAD_LOCK (aggpad); + + if (aggpad->priv->flow_return != GST_FLOW_OK) + goto flushing; + + if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) { + GST_OBJECT_LOCK (aggpad); + gst_event_copy_segment (event, &aggpad->priv->head_segment); + aggpad->priv->head_position = aggpad->priv->head_segment.position; + update_time_level (aggpad, TRUE); + GST_OBJECT_UNLOCK (aggpad); + } + + GST_DEBUG_OBJECT (aggpad, "Store event in queue: %" GST_PTR_FORMAT, event); + g_queue_push_head (&aggpad->priv->data, event); + SRC_BROADCAST (self); + PAD_UNLOCK (aggpad); + SRC_UNLOCK (self); + } else { + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + + if (!klass->sink_event (self, aggpad, event)) { + /* Copied from GstPad to convert boolean to a GstFlowReturn in + * the event handling func */ + ret = GST_FLOW_ERROR; + } + } + + return ret; + +flushing: + GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping event", + gst_flow_get_name (aggpad->priv->flow_return)); + PAD_UNLOCK (aggpad); + SRC_UNLOCK (self); + if (GST_EVENT_IS_STICKY (event)) + gst_pad_store_sticky_event (GST_PAD (aggpad), event); + gst_event_unref (event); + + return aggpad->priv->flow_return; +} + +static gboolean +gst_aggregator_stop_pad (GstElement * self, GstPad * epad, gpointer user_data) +{ + GstAggregatorPad *pad = GST_AGGREGATOR_PAD_CAST (epad); + GstAggregator *agg = GST_AGGREGATOR_CAST (self); + + gst_aggregator_pad_flush (pad, agg); + + PAD_LOCK (pad); + pad->priv->flow_return = GST_FLOW_FLUSHING; + pad->priv->negotiated = FALSE; + PAD_BROADCAST_EVENT (pad); + PAD_UNLOCK (pad); + + return TRUE; +} + +static gboolean +gst_aggregator_stop (GstAggregator * agg) +{ + GstAggregatorClass *klass; + gboolean result; + + gst_aggregator_reset_flow_values (agg); + + /* Application needs to make sure no pads are added while it shuts us down */ + gst_element_foreach_sink_pad (GST_ELEMENT_CAST (agg), + gst_aggregator_stop_pad, NULL); + + klass = GST_AGGREGATOR_GET_CLASS (agg); + + if (klass->stop) + result = klass->stop (agg); + else + result = TRUE; + + agg->priv->has_peer_latency = FALSE; + agg->priv->peer_latency_live = FALSE; + agg->priv->peer_latency_min = agg->priv->peer_latency_max = 0; + + if (agg->priv->tags) + gst_tag_list_unref (agg->priv->tags); + agg->priv->tags = NULL; + + gst_aggregator_set_allocation (agg, NULL, NULL, NULL, NULL); + + if (agg->priv->running) { + /* As sinkpads get deactivated after the src pad, we + * may have restarted the source pad task after receiving + * flush events on one of our sinkpads. Stop our src pad + * task again if that is the case */ + gst_aggregator_stop_srcpad_task (agg, NULL); + } + + return result; +} + +/* GstElement vmethods implementations */ +static GstStateChangeReturn +gst_aggregator_change_state (GstElement * element, GstStateChange transition) +{ + GstStateChangeReturn ret; + GstAggregator *self = GST_AGGREGATOR (element); + + switch (transition) { + case GST_STATE_CHANGE_READY_TO_PAUSED: + if (!gst_aggregator_start (self)) + goto error_start; + break; + default: + break; + } + + if ((ret = + GST_ELEMENT_CLASS (aggregator_parent_class)->change_state (element, + transition)) == GST_STATE_CHANGE_FAILURE) + goto failure; + + + switch (transition) { + case GST_STATE_CHANGE_PAUSED_TO_READY: + if (!gst_aggregator_stop (self)) { + /* What to do in this case? Error out? */ + GST_ERROR_OBJECT (self, "Subclass failed to stop."); + } + break; + default: + break; + } + + return ret; + +/* ERRORS */ +failure: + { + GST_ERROR_OBJECT (element, "parent failed state change"); + return ret; + } +error_start: + { + GST_ERROR_OBJECT (element, "Subclass failed to start"); + return GST_STATE_CHANGE_FAILURE; + } +} + +static void +gst_aggregator_release_pad (GstElement * element, GstPad * pad) +{ + GstAggregator *self = GST_AGGREGATOR (element); + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + GST_INFO_OBJECT (pad, "Removing pad"); + + SRC_LOCK (self); + gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, TRUE); + gst_element_remove_pad (element, pad); + + self->priv->has_peer_latency = FALSE; + SRC_BROADCAST (self); + SRC_UNLOCK (self); +} + +static GstAggregatorPad * +gst_aggregator_default_create_new_pad (GstAggregator * self, + GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps) +{ + GstAggregatorPad *agg_pad; + GstAggregatorPrivate *priv = self->priv; + gint serial = 0; + gchar *name = NULL; + GType pad_type = + GST_PAD_TEMPLATE_GTYPE (templ) == + G_TYPE_NONE ? GST_TYPE_AGGREGATOR_PAD : GST_PAD_TEMPLATE_GTYPE (templ); + + if (templ->direction != GST_PAD_SINK) + goto not_sink; + + if (templ->presence != GST_PAD_REQUEST) + goto not_request; + + GST_OBJECT_LOCK (self); + if (req_name == NULL || strlen (req_name) < 6 + || !g_str_has_prefix (req_name, "sink_") + || strrchr (req_name, '%') != NULL) { + /* no name given when requesting the pad, use next available int */ + serial = ++priv->max_padserial; + } else { + gchar *endptr = NULL; + + /* parse serial number from requested padname */ + serial = g_ascii_strtoull (&req_name[5], &endptr, 10); + if (endptr != NULL && *endptr == '\0') { + if (serial > priv->max_padserial) { + priv->max_padserial = serial; + } + } else { + serial = ++priv->max_padserial; + } + } + + name = g_strdup_printf ("sink_%u", serial); + g_assert (g_type_is_a (pad_type, GST_TYPE_AGGREGATOR_PAD)); + agg_pad = g_object_new (pad_type, + "name", name, "direction", GST_PAD_SINK, "template", templ, NULL); + g_free (name); + + GST_OBJECT_UNLOCK (self); + + return agg_pad; + + /* errors */ +not_sink: + { + GST_WARNING_OBJECT (self, "request new pad that is not a SINK pad"); + return NULL; + } +not_request: + { + GST_WARNING_OBJECT (self, "request new pad that is not a REQUEST pad"); + return NULL; + } +} + +static GstPad * +gst_aggregator_request_new_pad (GstElement * element, + GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps) +{ + GstAggregator *self; + GstAggregatorPad *agg_pad; + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (element); + GstAggregatorPrivate *priv = GST_AGGREGATOR (element)->priv; + + self = GST_AGGREGATOR (element); + + agg_pad = klass->create_new_pad (self, templ, req_name, caps); + if (!agg_pad) { + GST_ERROR_OBJECT (element, "Couldn't create new pad"); + return NULL; + } + + GST_DEBUG_OBJECT (element, "Adding pad %s", GST_PAD_NAME (agg_pad)); + + if (priv->running) + gst_pad_set_active (GST_PAD (agg_pad), TRUE); + + /* add the pad to the element */ + gst_element_add_pad (element, GST_PAD (agg_pad)); + + return GST_PAD (agg_pad); +} + +/* Must be called with SRC_LOCK held */ + +static gboolean +gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query) +{ + gboolean query_ret, live; + GstClockTime our_latency, min, max; + + query_ret = gst_pad_query_default (self->srcpad, GST_OBJECT (self), query); + + if (!query_ret) { + GST_WARNING_OBJECT (self, "Latency query failed"); + return FALSE; + } + + gst_query_parse_latency (query, &live, &min, &max); + + if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (min))) { + GST_ERROR_OBJECT (self, "Invalid minimum latency %" GST_TIME_FORMAT + ". Please file a bug at " PACKAGE_BUGREPORT ".", GST_TIME_ARGS (min)); + return FALSE; + } + + if (self->priv->upstream_latency_min > min) { + GstClockTimeDiff diff = + GST_CLOCK_DIFF (min, self->priv->upstream_latency_min); + + min += diff; + if (GST_CLOCK_TIME_IS_VALID (max)) { + max += diff; + } + } + + if (min > max && GST_CLOCK_TIME_IS_VALID (max)) { + GST_ELEMENT_WARNING (self, CORE, CLOCK, (NULL), + ("Impossible to configure latency: max %" GST_TIME_FORMAT " < min %" + GST_TIME_FORMAT ". Add queues or other buffering elements.", + GST_TIME_ARGS (max), GST_TIME_ARGS (min))); + return FALSE; + } + + our_latency = self->priv->latency; + + self->priv->peer_latency_live = live; + self->priv->peer_latency_min = min; + self->priv->peer_latency_max = max; + self->priv->has_peer_latency = TRUE; + + /* add our own */ + min += our_latency; + min += self->priv->sub_latency_min; + if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max) + && GST_CLOCK_TIME_IS_VALID (max)) + max += self->priv->sub_latency_max + our_latency; + else + max = GST_CLOCK_TIME_NONE; + + SRC_BROADCAST (self); + + GST_DEBUG_OBJECT (self, "configured latency live:%s min:%" G_GINT64_FORMAT + " max:%" G_GINT64_FORMAT, live ? "true" : "false", min, max); + + gst_query_set_latency (query, live, min, max); + + return query_ret; +} + +/* + * MUST be called with the src_lock held. + * + * See gst_aggregator_get_latency() for doc + */ +static GstClockTime +gst_aggregator_get_latency_unlocked (GstAggregator * self) +{ + GstClockTime latency; + + g_return_val_if_fail (GST_IS_AGGREGATOR (self), 0); + + if (!self->priv->has_peer_latency) { + GstQuery *query = gst_query_new_latency (); + gboolean ret; + + ret = gst_aggregator_query_latency_unlocked (self, query); + gst_query_unref (query); + if (!ret) + return GST_CLOCK_TIME_NONE; + } + + if (!self->priv->has_peer_latency || !self->priv->peer_latency_live) + return GST_CLOCK_TIME_NONE; + + /* latency_min is never GST_CLOCK_TIME_NONE by construction */ + latency = self->priv->peer_latency_min; + + /* add our own */ + latency += self->priv->latency; + latency += self->priv->sub_latency_min; + + return latency; +} + +/** + * gst_aggregator_get_latency: + * @self: a #GstAggregator + * + * Retrieves the latency values reported by @self in response to the latency + * query, or %GST_CLOCK_TIME_NONE if there is not live source connected and the element + * will not wait for the clock. + * + * Typically only called by subclasses. + * + * Returns: The latency or %GST_CLOCK_TIME_NONE if the element does not sync + */ +GstClockTime +gst_aggregator_get_latency (GstAggregator * self) +{ + GstClockTime ret; + + SRC_LOCK (self); + ret = gst_aggregator_get_latency_unlocked (self); + SRC_UNLOCK (self); + + return ret; +} + +static gboolean +gst_aggregator_send_event (GstElement * element, GstEvent * event) +{ + GstAggregator *self = GST_AGGREGATOR (element); + + GST_STATE_LOCK (element); + if (GST_EVENT_TYPE (event) == GST_EVENT_SEEK && + GST_STATE (element) < GST_STATE_PAUSED) { + gdouble rate; + GstFormat fmt; + GstSeekFlags flags; + GstSeekType start_type, stop_type; + gint64 start, stop; + + gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type, + &start, &stop_type, &stop); + + GST_OBJECT_LOCK (self); + gst_segment_do_seek (&GST_AGGREGATOR_PAD (self->srcpad)->segment, rate, fmt, + flags, start_type, start, stop_type, stop, NULL); + self->priv->next_seqnum = gst_event_get_seqnum (event); + self->priv->first_buffer = FALSE; + GST_OBJECT_UNLOCK (self); + + GST_DEBUG_OBJECT (element, "Storing segment %" GST_PTR_FORMAT, event); + } + GST_STATE_UNLOCK (element); + + return GST_ELEMENT_CLASS (aggregator_parent_class)->send_event (element, + event); +} + +static gboolean +gst_aggregator_default_src_query (GstAggregator * self, GstQuery * query) +{ + gboolean res = TRUE; + + switch (GST_QUERY_TYPE (query)) { + case GST_QUERY_SEEKING: + { + GstFormat format; + + /* don't pass it along as some (file)sink might claim it does + * whereas with a collectpads in between that will not likely work */ + gst_query_parse_seeking (query, &format, NULL, NULL, NULL); + gst_query_set_seeking (query, format, FALSE, 0, -1); + res = TRUE; + + break; + } + case GST_QUERY_LATENCY: + SRC_LOCK (self); + res = gst_aggregator_query_latency_unlocked (self, query); + SRC_UNLOCK (self); + break; + default: + return gst_pad_query_default (self->srcpad, GST_OBJECT (self), query); + } + + return res; +} + +static gboolean +gst_aggregator_event_forward_func (GstPad * pad, gpointer user_data) +{ + EventData *evdata = user_data; + gboolean ret = TRUE; + GstPad *peer = gst_pad_get_peer (pad); + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + if (peer) { + if (evdata->only_to_active_pads && aggpad->priv->first_buffer) { + GST_DEBUG_OBJECT (pad, "not sending event to inactive pad"); + ret = TRUE; + } else { + ret = gst_pad_send_event (peer, gst_event_ref (evdata->event)); + GST_DEBUG_OBJECT (pad, "return of event push is %d", ret); + } + } + + if (ret == FALSE) { + if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK) { + GstQuery *seeking = gst_query_new_seeking (GST_FORMAT_TIME); + + GST_DEBUG_OBJECT (pad, "Event %" GST_PTR_FORMAT " failed", evdata->event); + + if (gst_pad_query (peer, seeking)) { + gboolean seekable; + + gst_query_parse_seeking (seeking, NULL, &seekable, NULL, NULL); + + if (seekable == FALSE) { + GST_INFO_OBJECT (pad, + "Source not seekable, We failed but it does not matter!"); + + ret = TRUE; + } + } else { + GST_ERROR_OBJECT (pad, "Query seeking FAILED"); + } + + gst_query_unref (seeking); + } + } else { + evdata->one_actually_seeked = TRUE; + } + + evdata->result &= ret; + + if (peer) + gst_object_unref (peer); + + /* Always send to all pads */ + return FALSE; +} + +static void +gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self, + EventData * evdata) +{ + evdata->result = TRUE; + evdata->one_actually_seeked = FALSE; + + gst_pad_forward (self->srcpad, gst_aggregator_event_forward_func, evdata); + + gst_event_unref (evdata->event); +} + +static gboolean +gst_aggregator_do_seek (GstAggregator * self, GstEvent * event) +{ + gdouble rate; + GstFormat fmt; + GstSeekFlags flags; + GstSeekType start_type, stop_type; + gint64 start, stop; + gboolean flush; + EventData evdata = { 0, }; + GstAggregatorPrivate *priv = self->priv; + + gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type, + &start, &stop_type, &stop); + + GST_INFO_OBJECT (self, "starting SEEK"); + + flush = flags & GST_SEEK_FLAG_FLUSH; + + GST_OBJECT_LOCK (self); + + if (gst_event_get_seqnum (event) == self->priv->next_seqnum) { + evdata.result = TRUE; + GST_DEBUG_OBJECT (self, "Dropping duplicated seek event with seqnum %d", + self->priv->next_seqnum); + GST_OBJECT_UNLOCK (self); + goto done; + } + + self->priv->next_seqnum = gst_event_get_seqnum (event); + + gst_segment_do_seek (&GST_AGGREGATOR_PAD (self->srcpad)->segment, rate, fmt, + flags, start_type, start, stop_type, stop, NULL); + + /* Seeking sets a position */ + self->priv->first_buffer = FALSE; + + if (flush) + priv->flushing = TRUE; + + GST_OBJECT_UNLOCK (self); + + if (flush) { + GstEvent *event = gst_event_new_flush_start (); + + gst_event_set_seqnum (event, self->priv->next_seqnum); + gst_aggregator_stop_srcpad_task (self, event); + } + + /* forward the seek upstream */ + evdata.event = event; + evdata.flush = flush; + evdata.only_to_active_pads = FALSE; + gst_aggregator_forward_event_to_all_sinkpads (self, &evdata); + event = NULL; + + if (!evdata.result || !evdata.one_actually_seeked) { + GST_OBJECT_LOCK (self); + priv->flushing = FALSE; + GST_OBJECT_UNLOCK (self); + + /* No flush stop is inbound for us to forward */ + if (flush) { + GstEvent *event = gst_event_new_flush_stop (TRUE); + + gst_event_set_seqnum (event, self->priv->next_seqnum); + gst_pad_push_event (self->srcpad, event); + } + } + +done: + GST_INFO_OBJECT (self, "seek done, result: %d", evdata.result); + + return evdata.result; +} + +static gboolean +gst_aggregator_default_src_event (GstAggregator * self, GstEvent * event) +{ + EventData evdata = { 0, }; + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_SEEK: + /* _do_seek() unrefs the event. */ + return gst_aggregator_do_seek (self, event); + case GST_EVENT_NAVIGATION: + /* navigation is rather pointless. */ + gst_event_unref (event); + return FALSE; + default: + break; + } + + /* Don't forward QOS events to pads that had no active buffer yet. Otherwise + * they will receive a QOS event that has earliest_time=0 (because we can't + * have negative timestamps), and consider their buffer as too late */ + evdata.event = event; + evdata.flush = FALSE; + evdata.only_to_active_pads = GST_EVENT_TYPE (event) == GST_EVENT_QOS; + gst_aggregator_forward_event_to_all_sinkpads (self, &evdata); + return evdata.result; +} + +static gboolean +gst_aggregator_src_pad_event_func (GstPad * pad, GstObject * parent, + GstEvent * event) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent); + + return klass->src_event (GST_AGGREGATOR (parent), event); +} + +static gboolean +gst_aggregator_src_pad_query_func (GstPad * pad, GstObject * parent, + GstQuery * query) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent); + + return klass->src_query (GST_AGGREGATOR (parent), query); +} + +static gboolean +gst_aggregator_src_pad_activate_mode_func (GstPad * pad, + GstObject * parent, GstPadMode mode, gboolean active) +{ + GstAggregator *self = GST_AGGREGATOR (parent); + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent); + + if (klass->src_activate) { + if (klass->src_activate (self, mode, active) == FALSE) { + return FALSE; + } + } + + if (active == TRUE) { + switch (mode) { + case GST_PAD_MODE_PUSH: + { + GST_INFO_OBJECT (pad, "Activating pad!"); + gst_aggregator_start_srcpad_task (self); + return TRUE; + } + default: + { + GST_ERROR_OBJECT (pad, "Only supported mode is PUSH"); + return FALSE; + } + } + } + + /* deactivating */ + GST_INFO_OBJECT (self, "Deactivating srcpad"); + + gst_aggregator_stop_srcpad_task (self, FALSE); + + return TRUE; +} + +static gboolean +gst_aggregator_default_sink_query (GstAggregator * self, + GstAggregatorPad * aggpad, GstQuery * query) +{ + GstPad *pad = GST_PAD (aggpad); + + if (GST_QUERY_TYPE (query) == GST_QUERY_ALLOCATION) { + GstQuery *decide_query = NULL; + GstAggregatorClass *agg_class; + gboolean ret; + + GST_OBJECT_LOCK (self); + PAD_LOCK (aggpad); + if (G_UNLIKELY (!aggpad->priv->negotiated)) { + GST_DEBUG_OBJECT (self, + "not negotiated yet, can't answer ALLOCATION query"); + PAD_UNLOCK (aggpad); + GST_OBJECT_UNLOCK (self); + + return FALSE; + } + + if ((decide_query = self->priv->allocation_query)) + gst_query_ref (decide_query); + PAD_UNLOCK (aggpad); + GST_OBJECT_UNLOCK (self); + + GST_DEBUG_OBJECT (self, + "calling propose allocation with query %" GST_PTR_FORMAT, decide_query); + + agg_class = GST_AGGREGATOR_GET_CLASS (self); + + /* pass the query to the propose_allocation vmethod if any */ + if (agg_class->propose_allocation) + ret = agg_class->propose_allocation (self, aggpad, decide_query, query); + else + ret = FALSE; + + if (decide_query) + gst_query_unref (decide_query); + + GST_DEBUG_OBJECT (self, "ALLOCATION ret %d, %" GST_PTR_FORMAT, ret, query); + return ret; + } + + return gst_pad_query_default (pad, GST_OBJECT (self), query); +} + +static gboolean +gst_aggregator_default_sink_query_pre_queue (GstAggregator * self, + GstAggregatorPad * aggpad, GstQuery * query) +{ + if (GST_QUERY_IS_SERIALIZED (query)) { + GstStructure *s; + gboolean ret = FALSE; + + SRC_LOCK (self); + PAD_LOCK (aggpad); + + if (aggpad->priv->flow_return != GST_FLOW_OK) { + SRC_UNLOCK (self); + goto flushing; + } + + g_queue_push_head (&aggpad->priv->data, query); + SRC_BROADCAST (self); + SRC_UNLOCK (self); + + while (!gst_aggregator_pad_queue_is_empty (aggpad) + && aggpad->priv->flow_return == GST_FLOW_OK) { + GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed"); + PAD_WAIT_EVENT (aggpad); + } + + s = gst_query_writable_structure (query); + if (gst_structure_get_boolean (s, "gst-aggregator-retval", &ret)) + gst_structure_remove_field (s, "gst-aggregator-retval"); + else + g_queue_remove (&aggpad->priv->data, query); + + if (aggpad->priv->flow_return != GST_FLOW_OK) + goto flushing; + + PAD_UNLOCK (aggpad); + + return ret; + } else { + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + + return klass->sink_query (self, aggpad, query); + } + +flushing: + GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping query", + gst_flow_get_name (aggpad->priv->flow_return)); + PAD_UNLOCK (aggpad); + + return FALSE; +} + +static void +gst_aggregator_finalize (GObject * object) +{ + GstAggregator *self = (GstAggregator *) object; + + g_mutex_clear (&self->priv->src_lock); + g_cond_clear (&self->priv->src_cond); + + G_OBJECT_CLASS (aggregator_parent_class)->finalize (object); +} + +/* + * gst_aggregator_set_latency_property: + * @agg: a #GstAggregator + * @latency: the new latency value (in nanoseconds). + * + * Sets the new latency value to @latency. This value is used to limit the + * amount of time a pad waits for data to appear before considering the pad + * as unresponsive. + */ +static void +gst_aggregator_set_latency_property (GstAggregator * self, GstClockTime latency) +{ + gboolean changed; + + g_return_if_fail (GST_IS_AGGREGATOR (self)); + g_return_if_fail (GST_CLOCK_TIME_IS_VALID (latency)); + + SRC_LOCK (self); + changed = (self->priv->latency != latency); + + if (changed) { + GList *item; + + GST_OBJECT_LOCK (self); + /* First lock all the pads */ + for (item = GST_ELEMENT_CAST (self)->sinkpads; item; item = item->next) { + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data); + PAD_LOCK (aggpad); + } + + self->priv->latency = latency; + + SRC_BROADCAST (self); + + /* Now wake up the pads */ + for (item = GST_ELEMENT_CAST (self)->sinkpads; item; item = item->next) { + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data); + PAD_BROADCAST_EVENT (aggpad); + PAD_UNLOCK (aggpad); + } + GST_OBJECT_UNLOCK (self); + } + + SRC_UNLOCK (self); + + if (changed) + gst_element_post_message (GST_ELEMENT_CAST (self), + gst_message_new_latency (GST_OBJECT_CAST (self))); +} + +/* + * gst_aggregator_get_latency_property: + * @agg: a #GstAggregator + * + * Gets the latency value. See gst_aggregator_set_latency for + * more details. + * + * Returns: The time in nanoseconds to wait for data to arrive on a sink pad + * before a pad is deemed unresponsive. A value of -1 means an + * unlimited time. + */ +static GstClockTime +gst_aggregator_get_latency_property (GstAggregator * agg) +{ + GstClockTime res; + + g_return_val_if_fail (GST_IS_AGGREGATOR (agg), GST_CLOCK_TIME_NONE); + + GST_OBJECT_LOCK (agg); + res = agg->priv->latency; + GST_OBJECT_UNLOCK (agg); + + return res; +} + +static void +gst_aggregator_set_property (GObject * object, guint prop_id, + const GValue * value, GParamSpec * pspec) +{ + GstAggregator *agg = GST_AGGREGATOR (object); + + switch (prop_id) { + case PROP_LATENCY: + gst_aggregator_set_latency_property (agg, g_value_get_uint64 (value)); + break; + case PROP_MIN_UPSTREAM_LATENCY: + SRC_LOCK (agg); + agg->priv->upstream_latency_min = g_value_get_uint64 (value); + SRC_UNLOCK (agg); + break; + case PROP_START_TIME_SELECTION: + agg->priv->start_time_selection = g_value_get_enum (value); + break; + case PROP_START_TIME: + agg->priv->start_time = g_value_get_uint64 (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +static void +gst_aggregator_get_property (GObject * object, guint prop_id, + GValue * value, GParamSpec * pspec) +{ + GstAggregator *agg = GST_AGGREGATOR (object); + + switch (prop_id) { + case PROP_LATENCY: + g_value_set_uint64 (value, gst_aggregator_get_latency_property (agg)); + break; + case PROP_MIN_UPSTREAM_LATENCY: + SRC_LOCK (agg); + g_value_set_uint64 (value, agg->priv->upstream_latency_min); + SRC_UNLOCK (agg); + break; + case PROP_START_TIME_SELECTION: + g_value_set_enum (value, agg->priv->start_time_selection); + break; + case PROP_START_TIME: + g_value_set_uint64 (value, agg->priv->start_time); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +/* GObject vmethods implementations */ +static void +gst_aggregator_class_init (GstAggregatorClass * klass) +{ + GObjectClass *gobject_class = (GObjectClass *) klass; + GstElementClass *gstelement_class = (GstElementClass *) klass; + + aggregator_parent_class = g_type_class_peek_parent (klass); + + GST_DEBUG_CATEGORY_INIT (aggregator_debug, "aggregator", + GST_DEBUG_FG_MAGENTA, "GstAggregator"); + + if (aggregator_private_offset != 0) + g_type_class_adjust_private_offset (klass, &aggregator_private_offset); + + klass->finish_buffer = gst_aggregator_default_finish_buffer; + + klass->sink_event = gst_aggregator_default_sink_event; + klass->sink_query = gst_aggregator_default_sink_query; + + klass->src_event = gst_aggregator_default_src_event; + klass->src_query = gst_aggregator_default_src_query; + + klass->create_new_pad = gst_aggregator_default_create_new_pad; + klass->update_src_caps = gst_aggregator_default_update_src_caps; + klass->fixate_src_caps = gst_aggregator_default_fixate_src_caps; + klass->negotiated_src_caps = gst_aggregator_default_negotiated_src_caps; + + klass->negotiate = gst_aggregator_default_negotiate; + + klass->sink_event_pre_queue = gst_aggregator_default_sink_event_pre_queue; + klass->sink_query_pre_queue = gst_aggregator_default_sink_query_pre_queue; + + gstelement_class->request_new_pad = + GST_DEBUG_FUNCPTR (gst_aggregator_request_new_pad); + gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_aggregator_send_event); + gstelement_class->release_pad = + GST_DEBUG_FUNCPTR (gst_aggregator_release_pad); + gstelement_class->change_state = + GST_DEBUG_FUNCPTR (gst_aggregator_change_state); + + gobject_class->set_property = gst_aggregator_set_property; + gobject_class->get_property = gst_aggregator_get_property; + gobject_class->finalize = gst_aggregator_finalize; + + g_object_class_install_property (gobject_class, PROP_LATENCY, + g_param_spec_uint64 ("latency", "Buffer latency", + "Additional latency in live mode to allow upstream " + "to take longer to produce buffers for the current " + "position (in nanoseconds)", 0, G_MAXUINT64, + DEFAULT_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + /** + * GstAggregator:min-upstream-latency: + * + * Force minimum upstream latency (in nanoseconds). When sources with a + * higher latency are expected to be plugged in dynamically after the + * aggregator has started playing, this allows overriding the minimum + * latency reported by the initial source(s). This is only taken into + * account when larger than the actually reported minimum latency. + * + * Since: 1.16 + */ + g_object_class_install_property (gobject_class, PROP_MIN_UPSTREAM_LATENCY, + g_param_spec_uint64 ("min-upstream-latency", "Buffer latency", + "When sources with a higher latency are expected to be plugged " + "in dynamically after the aggregator has started playing, " + "this allows overriding the minimum latency reported by the " + "initial source(s). This is only taken into account when larger " + "than the actually reported minimum latency. (nanoseconds)", + 0, G_MAXUINT64, + DEFAULT_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_START_TIME_SELECTION, + g_param_spec_enum ("start-time-selection", "Start Time Selection", + "Decides which start time is output", + gst_aggregator_start_time_selection_get_type (), + DEFAULT_START_TIME_SELECTION, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_START_TIME, + g_param_spec_uint64 ("start-time", "Start Time", + "Start time to use if start-time-selection=set", 0, + G_MAXUINT64, + DEFAULT_START_TIME, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); +} + +static inline gpointer +gst_aggregator_get_instance_private (GstAggregator * self) +{ + return (G_STRUCT_MEMBER_P (self, aggregator_private_offset)); +} + +static void +gst_aggregator_init (GstAggregator * self, GstAggregatorClass * klass) +{ + GstPadTemplate *pad_template; + GstAggregatorPrivate *priv; + GType pad_type; + + g_return_if_fail (klass->aggregate != NULL); + + self->priv = gst_aggregator_get_instance_private (self); + + priv = self->priv; + + pad_template = + gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src"); + g_return_if_fail (pad_template != NULL); + + priv->max_padserial = -1; + priv->tags_changed = FALSE; + + self->priv->peer_latency_live = FALSE; + self->priv->peer_latency_min = self->priv->sub_latency_min = 0; + self->priv->peer_latency_max = self->priv->sub_latency_max = 0; + self->priv->has_peer_latency = FALSE; + + pad_type = + GST_PAD_TEMPLATE_GTYPE (pad_template) == + G_TYPE_NONE ? GST_TYPE_AGGREGATOR_PAD : + GST_PAD_TEMPLATE_GTYPE (pad_template); + g_assert (g_type_is_a (pad_type, GST_TYPE_AGGREGATOR_PAD)); + self->srcpad = + g_object_new (pad_type, "name", "src", "direction", GST_PAD_SRC, + "template", pad_template, NULL); + + gst_aggregator_reset_flow_values (self); + + gst_pad_set_event_function (self->srcpad, + GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_event_func)); + gst_pad_set_query_function (self->srcpad, + GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_query_func)); + gst_pad_set_activatemode_function (self->srcpad, + GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_activate_mode_func)); + + gst_element_add_pad (GST_ELEMENT (self), self->srcpad); + + self->priv->upstream_latency_min = DEFAULT_MIN_UPSTREAM_LATENCY; + self->priv->latency = DEFAULT_LATENCY; + self->priv->start_time_selection = DEFAULT_START_TIME_SELECTION; + self->priv->start_time = DEFAULT_START_TIME; + + g_mutex_init (&self->priv->src_lock); + g_cond_init (&self->priv->src_cond); +} + +/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init + * method to get to the padtemplates */ +GType +gst_aggregator_get_type (void) +{ + static gsize type = 0; + + if (g_once_init_enter (&type)) { + GType _type; + static const GTypeInfo info = { + sizeof (GstAggregatorClass), + NULL, + NULL, + (GClassInitFunc) gst_aggregator_class_init, + NULL, + NULL, + sizeof (GstAggregator), + 0, + (GInstanceInitFunc) gst_aggregator_init, + }; + + _type = g_type_register_static (GST_TYPE_ELEMENT, + "GstAggregatorFallback", &info, G_TYPE_FLAG_ABSTRACT); + + aggregator_private_offset = + g_type_add_instance_private (_type, sizeof (GstAggregatorPrivate)); + + g_once_init_leave (&type, _type); + } + return type; +} + +/* Must be called with SRC lock and PAD lock held */ +static gboolean +gst_aggregator_pad_has_space (GstAggregator * self, GstAggregatorPad * aggpad) +{ + /* Empty queue always has space */ + if (aggpad->priv->num_buffers == 0 && aggpad->priv->clipped_buffer == NULL) + return TRUE; + + /* We also want at least two buffers, one is being processed and one is ready + * for the next iteration when we operate in live mode. */ + if (self->priv->peer_latency_live && aggpad->priv->num_buffers < 2) + return TRUE; + + /* zero latency, if there is a buffer, it's full */ + if (self->priv->latency == 0) + return FALSE; + + /* Allow no more buffers than the latency */ + return (aggpad->priv->time_level <= self->priv->latency); +} + +/* Must be called with the PAD_LOCK held */ +static void +apply_buffer (GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head) +{ + GstClockTime timestamp; + + if (GST_BUFFER_DTS_IS_VALID (buffer)) + timestamp = GST_BUFFER_DTS (buffer); + else + timestamp = GST_BUFFER_PTS (buffer); + + if (timestamp == GST_CLOCK_TIME_NONE) { + if (head) + timestamp = aggpad->priv->head_position; + else + timestamp = aggpad->priv->tail_position; + } + + /* add duration */ + if (GST_BUFFER_DURATION_IS_VALID (buffer)) + timestamp += GST_BUFFER_DURATION (buffer); + + if (head) + aggpad->priv->head_position = timestamp; + else + aggpad->priv->tail_position = timestamp; + + update_time_level (aggpad, head); +} + +/* + * Can be called either from the sinkpad's chain function or from the srcpad's + * thread in the case of a buffer synthetized from a GAP event. + * Because of this second case, FLUSH_LOCK can't be used here. + */ + +static GstFlowReturn +gst_aggregator_pad_chain_internal (GstAggregator * self, + GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head) +{ + GstFlowReturn flow_return; + GstClockTime buf_pts; + + PAD_LOCK (aggpad); + flow_return = aggpad->priv->flow_return; + if (flow_return != GST_FLOW_OK) + goto flushing; + + PAD_UNLOCK (aggpad); + + buf_pts = GST_BUFFER_PTS (buffer); + + for (;;) { + SRC_LOCK (self); + GST_OBJECT_LOCK (self); + PAD_LOCK (aggpad); + + if (aggpad->priv->first_buffer) { + self->priv->has_peer_latency = FALSE; + aggpad->priv->first_buffer = FALSE; + } + + if ((gst_aggregator_pad_has_space (self, aggpad) || !head) + && aggpad->priv->flow_return == GST_FLOW_OK) { + if (head) + g_queue_push_head (&aggpad->priv->data, buffer); + else + g_queue_push_tail (&aggpad->priv->data, buffer); + apply_buffer (aggpad, buffer, head); + aggpad->priv->num_buffers++; + buffer = NULL; + SRC_BROADCAST (self); + break; + } + + flow_return = aggpad->priv->flow_return; + if (flow_return != GST_FLOW_OK) { + GST_OBJECT_UNLOCK (self); + SRC_UNLOCK (self); + goto flushing; + } + GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed"); + GST_OBJECT_UNLOCK (self); + SRC_UNLOCK (self); + PAD_WAIT_EVENT (aggpad); + + PAD_UNLOCK (aggpad); + } + + if (self->priv->first_buffer) { + GstClockTime start_time; + GstAggregatorPad *srcpad = GST_AGGREGATOR_PAD (self->srcpad); + + switch (self->priv->start_time_selection) { + case GST_AGGREGATOR_START_TIME_SELECTION_ZERO: + default: + start_time = 0; + break; + case GST_AGGREGATOR_START_TIME_SELECTION_FIRST: + GST_OBJECT_LOCK (aggpad); + if (aggpad->priv->head_segment.format == GST_FORMAT_TIME) { + start_time = buf_pts; + if (start_time != -1) { + start_time = MAX (start_time, aggpad->priv->head_segment.start); + start_time = + gst_segment_to_running_time (&aggpad->priv->head_segment, + GST_FORMAT_TIME, start_time); + } + } else { + start_time = 0; + GST_WARNING_OBJECT (aggpad, + "Ignoring request of selecting the first start time " + "as the segment is a %s segment instead of a time segment", + gst_format_get_name (aggpad->segment.format)); + } + GST_OBJECT_UNLOCK (aggpad); + break; + case GST_AGGREGATOR_START_TIME_SELECTION_SET: + start_time = self->priv->start_time; + if (start_time == -1) + start_time = 0; + break; + } + + if (start_time != -1) { + if (srcpad->segment.position == -1) + srcpad->segment.position = start_time; + else + srcpad->segment.position = MIN (start_time, srcpad->segment.position); + + GST_DEBUG_OBJECT (self, "Selecting start time %" GST_TIME_FORMAT, + GST_TIME_ARGS (start_time)); + } + } + + PAD_UNLOCK (aggpad); + GST_OBJECT_UNLOCK (self); + SRC_UNLOCK (self); + + GST_DEBUG_OBJECT (aggpad, "Done chaining"); + + return flow_return; + +flushing: + PAD_UNLOCK (aggpad); + + GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping buffer", + gst_flow_get_name (flow_return)); + if (buffer) + gst_buffer_unref (buffer); + + return flow_return; +} + +static GstFlowReturn +gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer) +{ + GstFlowReturn ret; + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + PAD_FLUSH_LOCK (aggpad); + + ret = gst_aggregator_pad_chain_internal (GST_AGGREGATOR_CAST (object), + aggpad, buffer, TRUE); + + PAD_FLUSH_UNLOCK (aggpad); + + return ret; +} + +static gboolean +gst_aggregator_pad_query_func (GstPad * pad, GstObject * parent, + GstQuery * query) +{ + GstAggregator *self = GST_AGGREGATOR (parent); + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + g_assert (klass->sink_query_pre_queue); + return klass->sink_query_pre_queue (self, aggpad, query); +} + +static GstFlowReturn +gst_aggregator_pad_event_func (GstPad * pad, GstObject * parent, + GstEvent * event) +{ + GstAggregator *self = GST_AGGREGATOR (parent); + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + g_assert (klass->sink_event_pre_queue); + return klass->sink_event_pre_queue (self, aggpad, event); +} + +static gboolean +gst_aggregator_pad_activate_mode_func (GstPad * pad, + GstObject * parent, GstPadMode mode, gboolean active) +{ + GstAggregator *self = GST_AGGREGATOR (parent); + GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); + + if (active == FALSE) { + SRC_LOCK (self); + gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, TRUE); + SRC_BROADCAST (self); + SRC_UNLOCK (self); + } else { + PAD_LOCK (aggpad); + aggpad->priv->flow_return = GST_FLOW_OK; + PAD_BROADCAST_EVENT (aggpad); + PAD_UNLOCK (aggpad); + } + + return TRUE; +} + +/*********************************** + * GstAggregatorPad implementation * + ************************************/ +G_DEFINE_TYPE_WITH_PRIVATE (GstAggregatorPad, gst_aggregator_pad, GST_TYPE_PAD); + +#define DEFAULT_PAD_EMIT_SIGNALS FALSE + +enum +{ + PAD_PROP_0, + PAD_PROP_EMIT_SIGNALS, +}; + +enum +{ + PAD_SIGNAL_BUFFER_CONSUMED, + PAD_LAST_SIGNAL, +}; + +static guint gst_aggregator_pad_signals[PAD_LAST_SIGNAL] = { 0 }; + +static void +gst_aggregator_pad_constructed (GObject * object) +{ + GstPad *pad = GST_PAD (object); + + if (GST_PAD_IS_SINK (pad)) { + gst_pad_set_chain_function (pad, + GST_DEBUG_FUNCPTR (gst_aggregator_pad_chain)); + gst_pad_set_event_full_function_full (pad, + GST_DEBUG_FUNCPTR (gst_aggregator_pad_event_func), NULL, NULL); + gst_pad_set_query_function (pad, + GST_DEBUG_FUNCPTR (gst_aggregator_pad_query_func)); + gst_pad_set_activatemode_function (pad, + GST_DEBUG_FUNCPTR (gst_aggregator_pad_activate_mode_func)); + } +} + +static void +gst_aggregator_pad_finalize (GObject * object) +{ + GstAggregatorPad *pad = (GstAggregatorPad *) object; + + g_cond_clear (&pad->priv->event_cond); + g_mutex_clear (&pad->priv->flush_lock); + g_mutex_clear (&pad->priv->lock); + + G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->finalize (object); +} + +static void +gst_aggregator_pad_dispose (GObject * object) +{ + GstAggregatorPad *pad = (GstAggregatorPad *) object; + + gst_aggregator_pad_set_flushing (pad, GST_FLOW_FLUSHING, TRUE); + + G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->dispose (object); +} + +static void +gst_aggregator_pad_set_property (GObject * object, guint prop_id, + const GValue * value, GParamSpec * pspec) +{ + GstAggregatorPad *pad = GST_AGGREGATOR_PAD (object); + + switch (prop_id) { + case PAD_PROP_EMIT_SIGNALS: + pad->priv->emit_signals = g_value_get_boolean (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +static void +gst_aggregator_pad_get_property (GObject * object, guint prop_id, + GValue * value, GParamSpec * pspec) +{ + GstAggregatorPad *pad = GST_AGGREGATOR_PAD (object); + + switch (prop_id) { + case PAD_PROP_EMIT_SIGNALS: + g_value_set_boolean (value, pad->priv->emit_signals); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; + } +} + +static void +gst_aggregator_pad_class_init (GstAggregatorPadClass * klass) +{ + GObjectClass *gobject_class = (GObjectClass *) klass; + + gobject_class->constructed = gst_aggregator_pad_constructed; + gobject_class->finalize = gst_aggregator_pad_finalize; + gobject_class->dispose = gst_aggregator_pad_dispose; + gobject_class->set_property = gst_aggregator_pad_set_property; + gobject_class->get_property = gst_aggregator_pad_get_property; + + /** + * GstAggregatorPad:buffer-consumed: + * @buffer: The buffer that was consumed + * + * Signals that a buffer was consumed. As aggregator pads store buffers + * in an internal queue, there is no direct match between input and output + * buffers at any given time. This signal can be useful to forward metas + * such as #GstVideoTimeCodeMeta or #GstVideoCaptionMeta at the right time. + * + * Since: 1.16 + */ + gst_aggregator_pad_signals[PAD_SIGNAL_BUFFER_CONSUMED] = + g_signal_new ("buffer-consumed", G_TYPE_FROM_CLASS (klass), + G_SIGNAL_RUN_FIRST, 0, NULL, NULL, NULL, G_TYPE_NONE, 1, GST_TYPE_BUFFER); + + /** + * GstAggregatorPad:emit-signals: + * + * Enables the emission of signals such as #GstAggregatorPad::buffer-consumed + * + * Since: 1.16 + */ + g_object_class_install_property (gobject_class, PAD_PROP_EMIT_SIGNALS, + g_param_spec_boolean ("emit-signals", "Emit signals", + "Send signals to signal data consumption", DEFAULT_PAD_EMIT_SIGNALS, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); +} + +static void +gst_aggregator_pad_init (GstAggregatorPad * pad) +{ + pad->priv = gst_aggregator_pad_get_instance_private (pad); + + g_queue_init (&pad->priv->data); + g_cond_init (&pad->priv->event_cond); + + g_mutex_init (&pad->priv->flush_lock); + g_mutex_init (&pad->priv->lock); + + gst_aggregator_pad_reset_unlocked (pad); + pad->priv->negotiated = FALSE; + pad->priv->emit_signals = DEFAULT_PAD_EMIT_SIGNALS; +} + +/* Must be called with the PAD_LOCK held */ +static void +gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, GstBuffer * buffer) +{ + pad->priv->num_buffers--; + GST_TRACE_OBJECT (pad, "Consuming buffer %" GST_PTR_FORMAT, buffer); + if (buffer && pad->priv->emit_signals) { + g_signal_emit (pad, gst_aggregator_pad_signals[PAD_SIGNAL_BUFFER_CONSUMED], + 0, buffer); + } + PAD_BROADCAST_EVENT (pad); +} + +/* Must be called with the PAD_LOCK held */ +static void +gst_aggregator_pad_clip_buffer_unlocked (GstAggregatorPad * pad) +{ + GstAggregator *self = NULL; + GstAggregatorClass *aggclass = NULL; + GstBuffer *buffer = NULL; + + while (pad->priv->clipped_buffer == NULL && + GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) { + buffer = g_queue_pop_tail (&pad->priv->data); + + apply_buffer (pad, buffer, FALSE); + + /* We only take the parent here so that it's not taken if the buffer is + * already clipped or if the queue is empty. + */ + if (self == NULL) { + self = GST_AGGREGATOR (gst_pad_get_parent_element (GST_PAD (pad))); + if (self == NULL) { + gst_buffer_unref (buffer); + return; + } + + aggclass = GST_AGGREGATOR_GET_CLASS (self); + } + + if (aggclass->clip) { + GST_TRACE_OBJECT (pad, "Clipping: %" GST_PTR_FORMAT, buffer); + + buffer = aggclass->clip (self, pad, buffer); + + if (buffer == NULL) { + gst_aggregator_pad_buffer_consumed (pad, buffer); + GST_TRACE_OBJECT (pad, "Clipping consumed the buffer"); + } + } + + pad->priv->clipped_buffer = buffer; + } + + if (self) + gst_object_unref (self); +} + +/** + * gst_aggregator_pad_pop_buffer: + * @pad: the pad to get buffer from + * + * Steal the ref to the buffer currently queued in @pad. + * + * Returns: (transfer full): The buffer in @pad or NULL if no buffer was + * queued. You should unref the buffer after usage. + */ +GstBuffer * +gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad) +{ + GstBuffer *buffer; + + PAD_LOCK (pad); + + if (pad->priv->flow_return != GST_FLOW_OK) { + PAD_UNLOCK (pad); + return NULL; + } + + gst_aggregator_pad_clip_buffer_unlocked (pad); + + buffer = pad->priv->clipped_buffer; + + if (buffer) { + pad->priv->clipped_buffer = NULL; + gst_aggregator_pad_buffer_consumed (pad, buffer); + GST_DEBUG_OBJECT (pad, "Consumed: %" GST_PTR_FORMAT, buffer); + } + + PAD_UNLOCK (pad); + + return buffer; +} + +/** + * gst_aggregator_pad_drop_buffer: + * @pad: the pad where to drop any pending buffer + * + * Drop the buffer currently queued in @pad. + * + * Returns: TRUE if there was a buffer queued in @pad, or FALSE if not. + */ +gboolean +gst_aggregator_pad_drop_buffer (GstAggregatorPad * pad) +{ + GstBuffer *buf; + + buf = gst_aggregator_pad_pop_buffer (pad); + + if (buf == NULL) + return FALSE; + + gst_buffer_unref (buf); + return TRUE; +} + +/** + * gst_aggregator_pad_peek_buffer: + * @pad: the pad to get buffer from + * + * Returns: (transfer full): A reference to the buffer in @pad or + * NULL if no buffer was queued. You should unref the buffer after + * usage. + */ +GstBuffer * +gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad) +{ + GstBuffer *buffer; + + PAD_LOCK (pad); + + if (pad->priv->flow_return != GST_FLOW_OK) { + PAD_UNLOCK (pad); + return NULL; + } + + gst_aggregator_pad_clip_buffer_unlocked (pad); + + if (pad->priv->clipped_buffer) { + buffer = gst_buffer_ref (pad->priv->clipped_buffer); + } else { + buffer = NULL; + } + PAD_UNLOCK (pad); + + return buffer; +} + +/** + * gst_aggregator_pad_has_buffer: + * @pad: the pad to check the buffer on + * + * This checks if a pad has a buffer available that will be returned by + * a call to gst_aggregator_pad_peek_buffer() or + * gst_aggregator_pad_pop_buffer(). + * + * Returns: %TRUE if the pad has a buffer available as the next thing. + * + * Since: 1.14.1 + */ +gboolean +gst_aggregator_pad_has_buffer (GstAggregatorPad * pad) +{ + gboolean has_buffer; + + PAD_LOCK (pad); + gst_aggregator_pad_clip_buffer_unlocked (pad); + has_buffer = (pad->priv->clipped_buffer != NULL); + PAD_UNLOCK (pad); + + return has_buffer; +} + +/** + * gst_aggregator_pad_is_eos: + * @pad: an aggregator pad + * + * Returns: %TRUE if the pad is EOS, otherwise %FALSE. + */ +gboolean +gst_aggregator_pad_is_eos (GstAggregatorPad * pad) +{ + gboolean is_eos; + + PAD_LOCK (pad); + is_eos = pad->priv->eos; + PAD_UNLOCK (pad); + + return is_eos; +} + +#if 0 +/* + * gst_aggregator_merge_tags: + * @self: a #GstAggregator + * @tags: a #GstTagList to merge + * @mode: the #GstTagMergeMode to use + * + * Adds tags to so-called pending tags, which will be processed + * before pushing out data downstream. + * + * Note that this is provided for convenience, and the subclass is + * not required to use this and can still do tag handling on its own. + * + * MT safe. + */ +void +gst_aggregator_merge_tags (GstAggregator * self, + const GstTagList * tags, GstTagMergeMode mode) +{ + GstTagList *otags; + + g_return_if_fail (GST_IS_AGGREGATOR (self)); + g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags)); + + /* FIXME Check if we can use OBJECT lock here! */ + GST_OBJECT_LOCK (self); + if (tags) + GST_DEBUG_OBJECT (self, "merging tags %" GST_PTR_FORMAT, tags); + otags = self->priv->tags; + self->priv->tags = gst_tag_list_merge (self->priv->tags, tags, mode); + if (otags) + gst_tag_list_unref (otags); + self->priv->tags_changed = TRUE; + GST_OBJECT_UNLOCK (self); +} +#endif + +/** + * gst_aggregator_set_latency: + * @self: a #GstAggregator + * @min_latency: minimum latency + * @max_latency: maximum latency + * + * Lets #GstAggregator sub-classes tell the baseclass what their internal + * latency is. Will also post a LATENCY message on the bus so the pipeline + * can reconfigure its global latency. + */ +void +gst_aggregator_set_latency (GstAggregator * self, + GstClockTime min_latency, GstClockTime max_latency) +{ + gboolean changed = FALSE; + + g_return_if_fail (GST_IS_AGGREGATOR (self)); + g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency)); + g_return_if_fail (max_latency >= min_latency); + + SRC_LOCK (self); + if (self->priv->sub_latency_min != min_latency) { + self->priv->sub_latency_min = min_latency; + changed = TRUE; + } + if (self->priv->sub_latency_max != max_latency) { + self->priv->sub_latency_max = max_latency; + changed = TRUE; + } + + if (changed) + SRC_BROADCAST (self); + SRC_UNLOCK (self); + + if (changed) { + gst_element_post_message (GST_ELEMENT_CAST (self), + gst_message_new_latency (GST_OBJECT_CAST (self))); + } +} + +/** + * gst_aggregator_get_buffer_pool: + * @self: a #GstAggregator + * + * Returns: (transfer full): the instance of the #GstBufferPool used + * by @trans; free it after use it + */ +GstBufferPool * +gst_aggregator_get_buffer_pool (GstAggregator * self) +{ + GstBufferPool *pool; + + g_return_val_if_fail (GST_IS_AGGREGATOR (self), NULL); + + GST_OBJECT_LOCK (self); + pool = self->priv->pool; + if (pool) + gst_object_ref (pool); + GST_OBJECT_UNLOCK (self); + + return pool; +} + +/** + * gst_aggregator_get_allocator: + * @self: a #GstAggregator + * @allocator: (out) (allow-none) (transfer full): the #GstAllocator + * used + * @params: (out) (allow-none) (transfer full): the + * #GstAllocationParams of @allocator + * + * Lets #GstAggregator sub-classes get the memory @allocator + * acquired by the base class and its @params. + * + * Unref the @allocator after use it. + */ +void +gst_aggregator_get_allocator (GstAggregator * self, + GstAllocator ** allocator, GstAllocationParams * params) +{ + g_return_if_fail (GST_IS_AGGREGATOR (self)); + + if (allocator) + *allocator = self->priv->allocator ? + gst_object_ref (self->priv->allocator) : NULL; + + if (params) + *params = self->priv->allocation_params; +} + +/** + * gst_aggregator_simple_get_next_time: + * @self: A #GstAggregator + * + * This is a simple #GstAggregatorClass.get_next_time() implementation that + * just looks at the #GstSegment on the srcpad of the aggregator and bases + * the next time on the running time there. + * + * This is the desired behaviour in most cases where you have a live source + * and you have a dead line based aggregator subclass. + * + * Returns: The running time based on the position + * + * Since: 1.16 + */ +GstClockTime +gst_aggregator_simple_get_next_time (GstAggregator * self) +{ + GstClockTime next_time; + GstAggregatorPad *srcpad = GST_AGGREGATOR_PAD (self->srcpad); + GstSegment *segment = &srcpad->segment; + + GST_OBJECT_LOCK (self); + if (segment->position == -1 || segment->position < segment->start) + next_time = segment->start; + else + next_time = segment->position; + + if (segment->stop != -1 && next_time > segment->stop) + next_time = segment->stop; + + next_time = gst_segment_to_running_time (segment, GST_FORMAT_TIME, next_time); + GST_OBJECT_UNLOCK (self); + + return next_time; +} + +/** + * gst_aggregator_update_segment: + * + * Subclasses should use this to update the segment on their + * source pad, instead of directly pushing new segment events + * downstream. + * + * Since: 1.18 + */ +void +gst_aggregator_update_segment (GstAggregator * self, GstSegment * segment) +{ + g_return_if_fail (GST_IS_AGGREGATOR (self)); + g_return_if_fail (segment != NULL); + + GST_INFO_OBJECT (self, "Updating srcpad segment: %" GST_SEGMENT_FORMAT, + segment); + + GST_OBJECT_LOCK (self); + GST_AGGREGATOR_PAD (self->srcpad)->segment = *segment; + self->priv->send_segment = TRUE; + GST_OBJECT_UNLOCK (self); +} diff --git a/src/base/gstaggregator.h b/src/base/gstaggregator.h new file mode 100644 index 0000000..100f291 --- /dev/null +++ b/src/base/gstaggregator.h @@ -0,0 +1,396 @@ +/* GStreamer aggregator base class + * Copyright (C) 2014 Mathieu Duponchelle + * Copyright (C) 2014 Thibault Saunier + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +#ifndef __GST_AGGREGATOR_H__ +#define __GST_AGGREGATOR_H__ + +#include + +G_BEGIN_DECLS + +/************************** + * GstAggregator Structs * + *************************/ + +typedef struct _GstAggregator GstAggregator; +typedef struct _GstAggregatorPrivate GstAggregatorPrivate; +typedef struct _GstAggregatorClass GstAggregatorClass; + +/************************ + * GstAggregatorPad API * + ***********************/ + +#define GST_TYPE_AGGREGATOR_PAD (gst_aggregator_pad_get_type()) +#define GST_AGGREGATOR_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPad)) +#define GST_AGGREGATOR_PAD_CAST(obj) ((GstAggregatorPad *)(obj)) +#define GST_AGGREGATOR_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPadClass)) +#define GST_AGGREGATOR_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPadClass)) +#define GST_IS_AGGREGATOR_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AGGREGATOR_PAD)) +#define GST_IS_AGGREGATOR_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AGGREGATOR_PAD)) + +/**************************** + * GstAggregatorPad Structs * + ***************************/ + +typedef struct _GstAggregatorPad GstAggregatorPad; +typedef struct _GstAggregatorPadClass GstAggregatorPadClass; +typedef struct _GstAggregatorPadPrivate GstAggregatorPadPrivate; + +/** + * GstAggregatorPad: + * @segment: last segment received. + * + * The implementation the GstPad to use with #GstAggregator + * + * Since: 1.14 + */ +struct _GstAggregatorPad +{ + GstPad parent; + + /*< public >*/ + /* Protected by the OBJECT_LOCK */ + GstSegment segment; + + /* < private > */ + GstAggregatorPadPrivate * priv; + + gpointer _gst_reserved[GST_PADDING]; +}; + +/** + * GstAggregatorPadClass: + * @flush: Optional + * Called when the pad has received a flush stop, this is the place + * to flush any information specific to the pad, it allows for individual + * pads to be flushed while others might not be. + * @skip_buffer: Optional + * Called before input buffers are queued in the pad, return %TRUE + * if the buffer should be skipped. + * + * Since: 1.14 + */ +struct _GstAggregatorPadClass +{ + GstPadClass parent_class; + + GstFlowReturn (*flush) (GstAggregatorPad * aggpad, GstAggregator * aggregator); + gboolean (*skip_buffer) (GstAggregatorPad * aggpad, GstAggregator * aggregator, GstBuffer * buffer); + + /*< private >*/ + gpointer _gst_reserved[GST_PADDING_LARGE]; +}; + +GST_BASE_API +GType gst_aggregator_pad_get_type (void); + +/**************************** + * GstAggregatorPad methods * + ***************************/ + +GST_BASE_API +GstBuffer * gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad); + +GST_BASE_API +GstBuffer * gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad); + +GST_BASE_API +gboolean gst_aggregator_pad_drop_buffer (GstAggregatorPad * pad); + +GST_BASE_API +gboolean gst_aggregator_pad_has_buffer (GstAggregatorPad * pad); + +GST_BASE_API +gboolean gst_aggregator_pad_is_eos (GstAggregatorPad * pad); + +/********************* + * GstAggregator API * + ********************/ + +#define GST_TYPE_AGGREGATOR (gst_aggregator_get_type()) +#define GST_AGGREGATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AGGREGATOR,GstAggregator)) +#define GST_AGGREGATOR_CAST(obj) ((GstAggregator *)(obj)) +#define GST_AGGREGATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AGGREGATOR,GstAggregatorClass)) +#define GST_AGGREGATOR_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),GST_TYPE_AGGREGATOR,GstAggregatorClass)) +#define GST_IS_AGGREGATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AGGREGATOR)) +#define GST_IS_AGGREGATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AGGREGATOR)) + +#define GST_AGGREGATOR_FLOW_NEED_DATA GST_FLOW_CUSTOM_ERROR + +/** + * GstAggregator: + * @srcpad: the aggregator's source pad + * + * Aggregator base class object structure. + * + * Since: 1.14 + */ +struct _GstAggregator +{ + GstElement parent; + + /*< public >*/ + GstPad * srcpad; + + /*< private >*/ + GstAggregatorPrivate * priv; + + gpointer _gst_reserved[GST_PADDING_LARGE]; +}; + +/** + * GstAggregatorClass: + * @flush: Optional. + * Called after a successful flushing seek, once all the flush + * stops have been received. Flush pad-specific data in + * #GstAggregatorPad->flush. + * @clip: Optional. + * Called when a buffer is received on a sink pad, the task of + * clipping it and translating it to the current segment falls + * on the subclass. The function should use the segment of data + * and the negotiated media type on the pad to perform + * clipping of input buffer. This function takes ownership of + * buf and should output a buffer or return NULL in + * if the buffer should be dropped. + * @finish_buffer: Optional. + * Called when a subclass calls gst_aggregator_finish_buffer() + * from their aggregate function to push out a buffer. + * Subclasses can override this to modify or decorate buffers + * before they get pushed out. This function takes ownership + * of the buffer passed. Subclasses that override this method + * should always chain up to the parent class virtual method. + * @sink_event: Optional. + * Called when an event is received on a sink pad, the subclass + * should always chain up. + * @sink_query: Optional. + * Called when a query is received on a sink pad, the subclass + * should always chain up. + * @src_event: Optional. + * Called when an event is received on the src pad, the subclass + * should always chain up. + * @src_query: Optional. + * Called when a query is received on the src pad, the subclass + * should always chain up. + * @src_activate: Optional. + * Called when the src pad is activated, it will start/stop its + * pad task right after that call. + * @aggregate: Mandatory. + * Called when buffers are queued on all sinkpads. Classes + * should iterate the GstElement->sinkpads and peek or steal + * buffers from the #GstAggregatorPads. If the subclass returns + * GST_FLOW_EOS, sending of the eos event will be taken care + * of. Once / if a buffer has been constructed from the + * aggregated buffers, the subclass should call _finish_buffer. + * @stop: Optional. + * Called when the element goes from PAUSED to READY. + * The subclass should free all resources and reset its state. + * @start: Optional. + * Called when the element goes from READY to PAUSED. + * The subclass should get ready to process + * aggregated buffers. + * @get_next_time: Optional. + * Called when the element needs to know the running time of the next + * rendered buffer for live pipelines. This causes deadline + * based aggregation to occur. Defaults to returning + * GST_CLOCK_TIME_NONE causing the element to wait for buffers + * on all sink pads before aggregating. + * @create_new_pad: Optional. + * Called when a new pad needs to be created. Allows subclass that + * don't have a single sink pad template to provide a pad based + * on the provided information. + * @update_src_caps: Lets subclasses update the #GstCaps representing + * the src pad caps before usage. The result should end up + * in @ret. Return %GST_AGGREGATOR_FLOW_NEED_DATA to indicate that the + * element needs more information (caps, a buffer, etc) to + * choose the correct caps. Should return ANY caps if the + * stream has not caps at all. + * @fixate_src_caps: Optional. + * Fixate and return the src pad caps provided. The function takes + * ownership of @caps and returns a fixated version of + * @caps. @caps is not guaranteed to be writable. + * @negotiated_src_caps: Optional. + * Notifies subclasses what caps format has been negotiated + * @decide_allocation: Optional. + * Allows the subclass to influence the allocation choices. + * Setup the allocation parameters for allocating output + * buffers. The passed in query contains the result of the + * downstream allocation query. + * @propose_allocation: Optional. + * Allows the subclass to handle the allocation query from upstream. + * @negotiate: Optional. + * Negotiate the caps with the peer (Since: 1.18). + * @sink_event_pre_queue: Optional. + * Called when an event is received on a sink pad before queueing up + * serialized events. The subclass should always chain up (Since: 1.18). + * @sink_query_pre_queue: Optional. + * Called when a query is received on a sink pad before queueing up + * serialized queries. The subclass should always chain up (Since: 1.18). + * + * The aggregator base class will handle in a thread-safe way all manners of + * concurrent flushes, seeks, pad additions and removals, leaving to the + * subclass the responsibility of clipping buffers, and aggregating buffers in + * the way the implementor sees fit. + * + * It will also take care of event ordering (stream-start, segment, eos). + * + * Basically, a simple implementation will override @aggregate, and call + * _finish_buffer from inside that function. + * + * Since: 1.14 + */ +struct _GstAggregatorClass { + GstElementClass parent_class; + + GstFlowReturn (*flush) (GstAggregator * aggregator); + + GstBuffer * (*clip) (GstAggregator * aggregator, + GstAggregatorPad * aggregator_pad, + GstBuffer * buf); + + GstFlowReturn (*finish_buffer) (GstAggregator * aggregator, + GstBuffer * buffer); + + /* sinkpads virtual methods */ + gboolean (*sink_event) (GstAggregator * aggregator, + GstAggregatorPad * aggregator_pad, + GstEvent * event); + + gboolean (*sink_query) (GstAggregator * aggregator, + GstAggregatorPad * aggregator_pad, + GstQuery * query); + + /* srcpad virtual methods */ + gboolean (*src_event) (GstAggregator * aggregator, + GstEvent * event); + + gboolean (*src_query) (GstAggregator * aggregator, + GstQuery * query); + + gboolean (*src_activate) (GstAggregator * aggregator, + GstPadMode mode, + gboolean active); + + GstFlowReturn (*aggregate) (GstAggregator * aggregator, + gboolean timeout); + + gboolean (*stop) (GstAggregator * aggregator); + + gboolean (*start) (GstAggregator * aggregator); + + GstClockTime (*get_next_time) (GstAggregator * aggregator); + + GstAggregatorPad * (*create_new_pad) (GstAggregator * self, + GstPadTemplate * templ, + const gchar * req_name, + const GstCaps * caps); + + /** + * GstAggregatorClass::update_src_caps: + * @ret: (out) (allow-none): + */ + GstFlowReturn (*update_src_caps) (GstAggregator * self, + GstCaps * caps, + GstCaps ** ret); + GstCaps * (*fixate_src_caps) (GstAggregator * self, + GstCaps * caps); + gboolean (*negotiated_src_caps) (GstAggregator * self, + GstCaps * caps); + gboolean (*decide_allocation) (GstAggregator * self, + GstQuery * query); + gboolean (*propose_allocation) (GstAggregator * self, + GstAggregatorPad * pad, + GstQuery * decide_query, + GstQuery * query); + + gboolean (*negotiate) (GstAggregator * self); + + GstFlowReturn (*sink_event_pre_queue) (GstAggregator * aggregator, + GstAggregatorPad * aggregator_pad, + GstEvent * event); + + gboolean (*sink_query_pre_queue) (GstAggregator * aggregator, + GstAggregatorPad * aggregator_pad, + GstQuery * query); + + /*< private >*/ + gpointer _gst_reserved[GST_PADDING_LARGE-3]; +}; + +/************************************ + * GstAggregator convenience macros * + ***********************************/ + +/** + * GST_AGGREGATOR_SRC_PAD: + * @agg: a #GstAggregator + * + * Convenience macro to access the source pad of #GstAggregator + * + * Since: 1.6 + */ +#define GST_AGGREGATOR_SRC_PAD(agg) (((GstAggregator *)(agg))->srcpad) + +/************************* + * GstAggregator methods * + ************************/ + +GST_BASE_API +GstFlowReturn gst_aggregator_finish_buffer (GstAggregator * aggregator, + GstBuffer * buffer); + +GST_BASE_API +void gst_aggregator_set_src_caps (GstAggregator * self, + GstCaps * caps); + +GST_BASE_API +gboolean gst_aggregator_negotiate (GstAggregator * self); + +GST_BASE_API +void gst_aggregator_set_latency (GstAggregator * self, + GstClockTime min_latency, + GstClockTime max_latency); + +GST_BASE_API +GType gst_aggregator_get_type(void); + +GST_BASE_API +GstClockTime gst_aggregator_get_latency (GstAggregator * self); + +GST_BASE_API +GstBufferPool * gst_aggregator_get_buffer_pool (GstAggregator * self); + +GST_BASE_API +void gst_aggregator_get_allocator (GstAggregator * self, + GstAllocator ** allocator, + GstAllocationParams * params); + +GST_BASE_API +GstClockTime gst_aggregator_simple_get_next_time (GstAggregator * self); + +GST_BASE_API +void gst_aggregator_update_segment (GstAggregator * self, + GstSegment * segment); + +G_END_DECLS + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstAggregator, gst_object_unref) +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstAggregatorPad, gst_object_unref) + +#endif /* __GST_AGGREGATOR_H__ */ diff --git a/src/base/mod.rs b/src/base/mod.rs new file mode 100644 index 0000000..6248195 --- /dev/null +++ b/src/base/mod.rs @@ -0,0 +1,26 @@ +#[allow(clippy::unreadable_literal)] +#[allow(clippy::too_many_arguments)] +#[allow(clippy::match_same_arms)] +#[allow(clippy::type_complexity)] +mod auto; +pub use auto::*; + +mod utils; + +mod aggregator; +mod aggregator_pad; + +pub mod prelude { + pub use gst::glib::prelude::*; + pub use gst::prelude::*; + + pub use super::aggregator::AggregatorExtManual; + pub use super::aggregator_pad::AggregatorPadExtManual; + pub use super::auto::traits::*; +} + +pub mod subclass; + +mod ffi; + +pub const AGGREGATOR_FLOW_NEED_DATA: gst::FlowError = gst::FlowError::CustomError; diff --git a/src/base/subclass/aggregator.rs b/src/base/subclass/aggregator.rs new file mode 100644 index 0000000..af54d40 --- /dev/null +++ b/src/base/subclass/aggregator.rs @@ -0,0 +1,971 @@ +// Take a look at the license at the top of the repository in the LICENSE file. + +use super::super::ffi; + +use glib::translate::*; +use gst::glib; +use gst::prelude::*; +use gst::subclass::prelude::*; + +use std::ptr; + +use super::super::Aggregator; +use super::super::AggregatorPad; + +pub trait AggregatorImpl: AggregatorImplExt + ElementImpl { + fn flush(&self, aggregator: &Self::Type) -> Result { + self.parent_flush(aggregator) + } + + fn clip( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + buffer: gst::Buffer, + ) -> Option { + self.parent_clip(aggregator, aggregator_pad, buffer) + } + + fn finish_buffer( + &self, + aggregator: &Self::Type, + buffer: gst::Buffer, + ) -> Result { + self.parent_finish_buffer(aggregator, buffer) + } + + fn sink_event( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> bool { + self.parent_sink_event(aggregator, aggregator_pad, event) + } + + fn sink_event_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> Result { + self.parent_sink_event_pre_queue(aggregator, aggregator_pad, event) + } + + fn sink_query( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool { + self.parent_sink_query(aggregator, aggregator_pad, query) + } + + fn sink_query_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool { + self.parent_sink_query_pre_queue(aggregator, aggregator_pad, query) + } + + fn src_event(&self, aggregator: &Self::Type, event: gst::Event) -> bool { + self.parent_src_event(aggregator, event) + } + + fn src_query(&self, aggregator: &Self::Type, query: &mut gst::QueryRef) -> bool { + self.parent_src_query(aggregator, query) + } + + fn src_activate( + &self, + aggregator: &Self::Type, + mode: gst::PadMode, + active: bool, + ) -> Result<(), gst::LoggableError> { + self.parent_src_activate(aggregator, mode, active) + } + + fn aggregate( + &self, + aggregator: &Self::Type, + timeout: bool, + ) -> Result { + self.parent_aggregate(aggregator, timeout) + } + + fn start(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage> { + self.parent_start(aggregator) + } + + fn stop(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage> { + self.parent_stop(aggregator) + } + + fn next_time(&self, aggregator: &Self::Type) -> Option { + self.parent_next_time(aggregator) + } + + fn create_new_pad( + &self, + aggregator: &Self::Type, + templ: &gst::PadTemplate, + req_name: Option<&str>, + caps: Option<&gst::Caps>, + ) -> Option { + self.parent_create_new_pad(aggregator, templ, req_name, caps) + } + + fn update_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result { + self.parent_update_src_caps(aggregator, caps) + } + + fn fixate_src_caps(&self, aggregator: &Self::Type, caps: gst::Caps) -> gst::Caps { + self.parent_fixate_src_caps(aggregator, caps) + } + + fn negotiated_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result<(), gst::LoggableError> { + self.parent_negotiated_src_caps(aggregator, caps) + } + + fn negotiate(&self, aggregator: &Self::Type) -> bool { + self.parent_negotiate(aggregator) + } +} + +pub trait AggregatorImplExt: ObjectSubclass { + fn parent_flush(&self, aggregator: &Self::Type) -> Result; + + fn parent_clip( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + buffer: gst::Buffer, + ) -> Option; + + fn parent_finish_buffer( + &self, + aggregator: &Self::Type, + buffer: gst::Buffer, + ) -> Result; + + fn parent_sink_event( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> bool; + + fn parent_sink_event_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> Result; + + fn parent_sink_query( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool; + + fn parent_sink_query_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool; + + fn parent_src_event(&self, aggregator: &Self::Type, event: gst::Event) -> bool; + + fn parent_src_query(&self, aggregator: &Self::Type, query: &mut gst::QueryRef) -> bool; + + fn parent_src_activate( + &self, + aggregator: &Self::Type, + mode: gst::PadMode, + active: bool, + ) -> Result<(), gst::LoggableError>; + + fn parent_aggregate( + &self, + aggregator: &Self::Type, + timeout: bool, + ) -> Result; + + fn parent_start(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage>; + + fn parent_stop(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage>; + + fn parent_next_time(&self, aggregator: &Self::Type) -> Option; + + fn parent_create_new_pad( + &self, + aggregator: &Self::Type, + templ: &gst::PadTemplate, + req_name: Option<&str>, + caps: Option<&gst::Caps>, + ) -> Option; + + fn parent_update_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result; + + fn parent_fixate_src_caps(&self, aggregator: &Self::Type, caps: gst::Caps) -> gst::Caps; + + fn parent_negotiated_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result<(), gst::LoggableError>; + + fn parent_negotiate(&self, aggregator: &Self::Type) -> bool; +} + +impl AggregatorImplExt for T { + fn parent_flush(&self, aggregator: &Self::Type) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .flush + .map(|f| { + try_from_glib(f(aggregator + .unsafe_cast_ref::() + .to_glib_none() + .0)) + }) + .unwrap_or(Ok(gst::FlowSuccess::Ok)) + } + } + + fn parent_clip( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + buffer: gst::Buffer, + ) -> Option { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + match (*parent_class).clip { + None => Some(buffer), + Some(ref func) => from_glib_full(func( + aggregator.unsafe_cast_ref::().to_glib_none().0, + aggregator_pad.to_glib_none().0, + buffer.into_ptr(), + )), + } + } + } + + fn parent_finish_buffer( + &self, + aggregator: &Self::Type, + buffer: gst::Buffer, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .finish_buffer + .expect("Missing parent function `finish_buffer`"); + try_from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + buffer.into_ptr(), + )) + } + } + + fn parent_sink_event( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .sink_event + .expect("Missing parent function `sink_event`"); + from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + aggregator_pad.to_glib_none().0, + event.into_ptr(), + )) + } + } + + fn parent_sink_event_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + event: gst::Event, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .sink_event_pre_queue + .expect("Missing parent function `sink_event_pre_queue`"); + try_from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + aggregator_pad.to_glib_none().0, + event.into_ptr(), + )) + } + } + + fn parent_sink_query( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool { + unsafe { + let data = T::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .sink_query + .expect("Missing parent function `sink_query`"); + from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + aggregator_pad.to_glib_none().0, + query.as_mut_ptr(), + )) + } + } + + fn parent_sink_query_pre_queue( + &self, + aggregator: &Self::Type, + aggregator_pad: &AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .sink_query_pre_queue + .expect("Missing parent function `sink_query`"); + from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + aggregator_pad.to_glib_none().0, + query.as_mut_ptr(), + )) + } + } + + fn parent_src_event(&self, aggregator: &Self::Type, event: gst::Event) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .src_event + .expect("Missing parent function `src_event`"); + from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + event.into_ptr(), + )) + } + } + + fn parent_src_query(&self, aggregator: &Self::Type, query: &mut gst::QueryRef) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .src_query + .expect("Missing parent function `src_query`"); + from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + query.as_mut_ptr(), + )) + } + } + + fn parent_src_activate( + &self, + aggregator: &Self::Type, + mode: gst::PadMode, + active: bool, + ) -> Result<(), gst::LoggableError> { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + match (*parent_class).src_activate { + None => Ok(()), + Some(f) => gst::result_from_gboolean!( + f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + mode.into_glib(), + active.into_glib() + ), + gst::CAT_RUST, + "Parent function `src_activate` failed" + ), + } + } + } + + fn parent_aggregate( + &self, + aggregator: &Self::Type, + timeout: bool, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .aggregate + .expect("Missing parent function `aggregate`"); + try_from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + timeout.into_glib(), + )) + } + } + + fn parent_start(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage> { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .start + .map(|f| { + if from_glib(f(aggregator + .unsafe_cast_ref::() + .to_glib_none() + .0)) + { + Ok(()) + } else { + Err(gst::error_msg!( + gst::CoreError::Failed, + ["Parent function `start` failed"] + )) + } + }) + .unwrap_or(Ok(())) + } + } + + fn parent_stop(&self, aggregator: &Self::Type) -> Result<(), gst::ErrorMessage> { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .stop + .map(|f| { + if from_glib(f(aggregator + .unsafe_cast_ref::() + .to_glib_none() + .0)) + { + Ok(()) + } else { + Err(gst::error_msg!( + gst::CoreError::Failed, + ["Parent function `stop` failed"] + )) + } + }) + .unwrap_or(Ok(())) + } + } + + fn parent_next_time(&self, aggregator: &Self::Type) -> Option { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .get_next_time + .map(|f| { + from_glib(f(aggregator + .unsafe_cast_ref::() + .to_glib_none() + .0)) + }) + .unwrap_or(gst::ClockTime::NONE) + } + } + + fn parent_create_new_pad( + &self, + aggregator: &Self::Type, + templ: &gst::PadTemplate, + req_name: Option<&str>, + caps: Option<&gst::Caps>, + ) -> Option { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .create_new_pad + .expect("Missing parent function `create_new_pad`"); + from_glib_full(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + templ.to_glib_none().0, + req_name.to_glib_none().0, + caps.to_glib_none().0, + )) + } + } + + fn parent_update_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .update_src_caps + .expect("Missing parent function `update_src_caps`"); + + let mut out_caps = ptr::null_mut(); + gst::FlowSuccess::try_from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + caps.as_mut_ptr(), + &mut out_caps, + )) + .map(|_| from_glib_full(out_caps)) + } + } + + fn parent_fixate_src_caps(&self, aggregator: &Self::Type, caps: gst::Caps) -> gst::Caps { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + + let f = (*parent_class) + .fixate_src_caps + .expect("Missing parent function `fixate_src_caps`"); + from_glib_full(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + caps.into_ptr(), + )) + } + } + + fn parent_negotiated_src_caps( + &self, + aggregator: &Self::Type, + caps: &gst::Caps, + ) -> Result<(), gst::LoggableError> { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .negotiated_src_caps + .map(|f| { + gst::result_from_gboolean!( + f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + caps.to_glib_none().0 + ), + gst::CAT_RUST, + "Parent function `negotiated_src_caps` failed" + ) + }) + .unwrap_or(Ok(())) + } + } + + fn parent_negotiate(&self, aggregator: &Self::Type) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .negotiate + .map(|f| { + from_glib(f(aggregator + .unsafe_cast_ref::() + .to_glib_none() + .0)) + }) + .unwrap_or(true) + } + } +} + +unsafe impl IsSubclassable for Aggregator { + fn class_init(klass: &mut glib::Class) { + >::class_init(klass); + let klass = klass.as_mut(); + klass.flush = Some(aggregator_flush::); + klass.clip = Some(aggregator_clip::); + klass.finish_buffer = Some(aggregator_finish_buffer::); + klass.sink_event = Some(aggregator_sink_event::); + klass.sink_query = Some(aggregator_sink_query::); + klass.src_event = Some(aggregator_src_event::); + klass.src_query = Some(aggregator_src_query::); + klass.src_activate = Some(aggregator_src_activate::); + klass.aggregate = Some(aggregator_aggregate::); + klass.start = Some(aggregator_start::); + klass.stop = Some(aggregator_stop::); + klass.get_next_time = Some(aggregator_get_next_time::); + klass.create_new_pad = Some(aggregator_create_new_pad::); + klass.update_src_caps = Some(aggregator_update_src_caps::); + klass.fixate_src_caps = Some(aggregator_fixate_src_caps::); + klass.negotiated_src_caps = Some(aggregator_negotiated_src_caps::); + { + klass.sink_event_pre_queue = Some(aggregator_sink_event_pre_queue::); + klass.sink_query_pre_queue = Some(aggregator_sink_query_pre_queue::); + klass.negotiate = Some(aggregator_negotiate::); + } + } + + fn instance_init(instance: &mut glib::subclass::InitializingObject) { + >::instance_init(instance); + } +} + +unsafe extern "C" fn aggregator_flush( + ptr: *mut ffi::GstAggregator, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + imp.flush(wrap.unsafe_cast_ref()).into() + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_clip( + ptr: *mut ffi::GstAggregator, + aggregator_pad: *mut ffi::GstAggregatorPad, + buffer: *mut gst::ffi::GstBuffer, +) -> *mut gst::ffi::GstBuffer { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + let ret = gst::panic_to_error!(&wrap, &imp.panicked(), None, { + imp.clip( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator_pad), + from_glib_full(buffer), + ) + }); + + ret.map(|r| r.into_ptr()).unwrap_or(ptr::null_mut()) +} + +unsafe extern "C" fn aggregator_finish_buffer( + ptr: *mut ffi::GstAggregator, + buffer: *mut gst::ffi::GstBuffer, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + imp.finish_buffer(wrap.unsafe_cast_ref(), from_glib_full(buffer)) + .into() + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_sink_event( + ptr: *mut ffi::GstAggregator, + aggregator_pad: *mut ffi::GstAggregatorPad, + event: *mut gst::ffi::GstEvent, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(wrap, &imp.panicked(), false, { + imp.sink_event( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator_pad), + from_glib_full(event), + ) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_sink_event_pre_queue( + ptr: *mut ffi::GstAggregator, + aggregator_pad: *mut ffi::GstAggregatorPad, + event: *mut gst::ffi::GstEvent, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + imp.sink_event_pre_queue( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator_pad), + from_glib_full(event), + ) + .into() + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_sink_query( + ptr: *mut ffi::GstAggregator, + aggregator_pad: *mut ffi::GstAggregatorPad, + query: *mut gst::ffi::GstQuery, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + imp.sink_query( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator_pad), + gst::QueryRef::from_mut_ptr(query), + ) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_sink_query_pre_queue( + ptr: *mut ffi::GstAggregator, + aggregator_pad: *mut ffi::GstAggregatorPad, + query: *mut gst::ffi::GstQuery, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + imp.sink_query_pre_queue( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator_pad), + gst::QueryRef::from_mut_ptr(query), + ) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_src_event( + ptr: *mut ffi::GstAggregator, + event: *mut gst::ffi::GstEvent, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + imp.src_event(wrap.unsafe_cast_ref(), from_glib_full(event)) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_src_query( + ptr: *mut ffi::GstAggregator, + query: *mut gst::ffi::GstQuery, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + imp.src_query(wrap.unsafe_cast_ref(), gst::QueryRef::from_mut_ptr(query)) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_src_activate( + ptr: *mut ffi::GstAggregator, + mode: gst::ffi::GstPadMode, + active: glib::ffi::gboolean, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + match imp.src_activate(wrap.unsafe_cast_ref(), from_glib(mode), from_glib(active)) { + Ok(()) => true, + Err(err) => { + err.log_with_object(&*wrap); + false + } + } + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_aggregate( + ptr: *mut ffi::GstAggregator, + timeout: glib::ffi::gboolean, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + imp.aggregate(wrap.unsafe_cast_ref(), from_glib(timeout)) + .into() + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_start( + ptr: *mut ffi::GstAggregator, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + match imp.start(wrap.unsafe_cast_ref()) { + Ok(()) => true, + Err(err) => { + wrap.post_error_message(err); + false + } + } + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_stop( + ptr: *mut ffi::GstAggregator, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + match imp.stop(wrap.unsafe_cast_ref()) { + Ok(()) => true, + Err(err) => { + wrap.post_error_message(err); + false + } + } + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_get_next_time( + ptr: *mut ffi::GstAggregator, +) -> gst::ffi::GstClockTime { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::ClockTime::NONE, { + imp.next_time(wrap.unsafe_cast_ref()) + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_create_new_pad( + ptr: *mut ffi::GstAggregator, + templ: *mut gst::ffi::GstPadTemplate, + req_name: *const libc::c_char, + caps: *const gst::ffi::GstCaps, +) -> *mut ffi::GstAggregatorPad { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), None, { + let req_name: Borrowed> = from_glib_borrow(req_name); + + imp.create_new_pad( + wrap.unsafe_cast_ref(), + &from_glib_borrow(templ), + req_name.as_ref().as_ref().map(|s| s.as_str()), + Option::::from_glib_borrow(caps) + .as_ref() + .as_ref(), + ) + }) + .to_glib_full() +} + +unsafe extern "C" fn aggregator_update_src_caps( + ptr: *mut ffi::GstAggregator, + caps: *mut gst::ffi::GstCaps, + res: *mut *mut gst::ffi::GstCaps, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + *res = ptr::null_mut(); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + match imp.update_src_caps(wrap.unsafe_cast_ref(), &from_glib_borrow(caps)) { + Ok(res_caps) => { + *res = res_caps.into_ptr(); + gst::FlowReturn::Ok + } + Err(err) => err.into(), + } + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_fixate_src_caps( + ptr: *mut ffi::GstAggregator, + caps: *mut gst::ffi::GstCaps, +) -> *mut gst::ffi::GstCaps { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), gst::Caps::new_empty(), { + imp.fixate_src_caps(wrap.unsafe_cast_ref(), from_glib_full(caps)) + }) + .into_ptr() +} + +unsafe extern "C" fn aggregator_negotiated_src_caps( + ptr: *mut ffi::GstAggregator, + caps: *mut gst::ffi::GstCaps, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + match imp.negotiated_src_caps(wrap.unsafe_cast_ref(), &from_glib_borrow(caps)) { + Ok(()) => true, + Err(err) => { + err.log_with_object(&*wrap); + false + } + } + }) + .into_glib() +} + +unsafe extern "C" fn aggregator_negotiate( + ptr: *mut ffi::GstAggregator, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, &imp.panicked(), false, { + imp.negotiate(wrap.unsafe_cast_ref()) + }) + .into_glib() +} diff --git a/src/base/subclass/aggregator_pad.rs b/src/base/subclass/aggregator_pad.rs new file mode 100644 index 0000000..1a24f30 --- /dev/null +++ b/src/base/subclass/aggregator_pad.rs @@ -0,0 +1,138 @@ +// Take a look at the license at the top of the repository in the LICENSE file. + +use super::super::ffi; + +use glib::translate::*; +use gst::glib; +use gst::prelude::*; +use gst::subclass::prelude::*; + +use super::super::Aggregator; +use super::super::AggregatorPad; + +pub trait AggregatorPadImpl: AggregatorPadImplExt + PadImpl { + fn flush( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + ) -> Result { + self.parent_flush(aggregator_pad, aggregator) + } + + fn skip_buffer( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + buffer: &gst::Buffer, + ) -> bool { + self.parent_skip_buffer(aggregator_pad, aggregator, buffer) + } +} + +pub trait AggregatorPadImplExt: ObjectSubclass { + fn parent_flush( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + ) -> Result; + + fn parent_skip_buffer( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + buffer: &gst::Buffer, + ) -> bool; +} + +impl AggregatorPadImplExt for T { + fn parent_flush( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorPadClass; + (*parent_class) + .flush + .map(|f| { + try_from_glib(f( + aggregator_pad + .unsafe_cast_ref::() + .to_glib_none() + .0, + aggregator.to_glib_none().0, + )) + }) + .unwrap_or(Ok(gst::FlowSuccess::Ok)) + } + } + + fn parent_skip_buffer( + &self, + aggregator_pad: &Self::Type, + aggregator: &Aggregator, + buffer: &gst::Buffer, + ) -> bool { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorPadClass; + (*parent_class) + .skip_buffer + .map(|f| { + from_glib(f( + aggregator_pad + .unsafe_cast_ref::() + .to_glib_none() + .0, + aggregator.to_glib_none().0, + buffer.to_glib_none().0, + )) + }) + .unwrap_or(false) + } + } +} +unsafe impl IsSubclassable for AggregatorPad { + fn class_init(klass: &mut glib::Class) { + >::class_init(klass); + let klass = klass.as_mut(); + klass.flush = Some(aggregator_pad_flush::); + klass.skip_buffer = Some(aggregator_pad_skip_buffer::); + } + + fn instance_init(instance: &mut glib::subclass::InitializingObject) { + >::instance_init(instance); + } +} + +unsafe extern "C" fn aggregator_pad_flush( + ptr: *mut ffi::GstAggregatorPad, + aggregator: *mut ffi::GstAggregator, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + let res: gst::FlowReturn = imp + .flush(wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator)) + .into(); + res.into_glib() +} + +unsafe extern "C" fn aggregator_pad_skip_buffer( + ptr: *mut ffi::GstAggregatorPad, + aggregator: *mut ffi::GstAggregator, + buffer: *mut gst::ffi::GstBuffer, +) -> glib::ffi::gboolean { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + imp.skip_buffer( + wrap.unsafe_cast_ref(), + &from_glib_borrow(aggregator), + &from_glib_borrow(buffer), + ) + .into_glib() +} diff --git a/src/base/subclass/mod.rs b/src/base/subclass/mod.rs new file mode 100644 index 0000000..319dc13 --- /dev/null +++ b/src/base/subclass/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2016-2018 Sebastian Dröge +// 2016 Luis de Bethencourt +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(clippy::cast_ptr_alignment)] + +pub mod aggregator; +pub mod aggregator_pad; + +pub mod prelude { + pub use super::aggregator::{AggregatorImpl, AggregatorImplExt}; + pub use super::aggregator_pad::{AggregatorPadImpl, AggregatorPadImplExt}; +} diff --git a/src/base/utils.rs b/src/base/utils.rs new file mode 100644 index 0000000..07e7687 --- /dev/null +++ b/src/base/utils.rs @@ -0,0 +1,26 @@ +// Take a look at the license at the top of the repository in the LICENSE file. + +use glib::translate::mut_override; +use gst::glib; + +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a>(&'a glib::ffi::GMutex); + +impl<'a> MutexGuard<'a> { + #[allow(clippy::trivially_copy_pass_by_ref)] + #[doc(alias = "g_mutex_lock")] + pub fn lock(mutex: &'a glib::ffi::GMutex) -> Self { + unsafe { + glib::ffi::g_mutex_lock(mut_override(mutex)); + } + MutexGuard(mutex) + } +} + +impl<'a> Drop for MutexGuard<'a> { + fn drop(&mut self) { + unsafe { + glib::ffi::g_mutex_unlock(mut_override(self.0)); + } + } +} From 9aab6f38fe6b4b4a0bd4761c4e173bf3600135b4 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 13 Oct 2021 12:15:59 -0600 Subject: [PATCH 02/10] Update aggregator backport files to include update_segment --- src/base/auto/aggregator.rs | 9 +++++++++ src/base/ffi.rs | 1 + 2 files changed, 10 insertions(+) diff --git a/src/base/auto/aggregator.rs b/src/base/auto/aggregator.rs index 05227fe..81f7600 100644 --- a/src/base/auto/aggregator.rs +++ b/src/base/auto/aggregator.rs @@ -54,6 +54,9 @@ pub trait AggregatorExt: 'static { #[doc(alias = "gst_aggregator_set_src_caps")] fn set_src_caps(&self, caps: &gst::Caps); + #[doc(alias = "gst_aggregator_update_segment")] + fn update_segment(&self, segment: &gst::Segment); + #[doc(alias = "gst_aggregator_simple_get_next_time")] fn simple_get_next_time(&self) -> Option; @@ -123,6 +126,12 @@ impl> AggregatorExt for O { } } + fn update_segment(&self, segment: &gst::Segment) { + unsafe { + ffi::gst_aggregator_update_segment(self.as_ref().to_glib_none().0, segment.to_glib_none().0); + } + } + fn simple_get_next_time(&self) -> Option { unsafe { from_glib(ffi::gst_aggregator_simple_get_next_time( diff --git a/src/base/ffi.rs b/src/base/ffi.rs index e4004f5..669736b 100644 --- a/src/base/ffi.rs +++ b/src/base/ffi.rs @@ -222,6 +222,7 @@ extern "C" { ); pub fn gst_aggregator_set_src_caps(self_: *mut GstAggregator, caps: *mut gst::GstCaps); pub fn gst_aggregator_simple_get_next_time(self_: *mut GstAggregator) -> gst::GstClockTime; + pub fn gst_aggregator_update_segment(self_: *mut GstAggregator, segment: *const gst::GstSegment); //========================================================================= // GstAggregatorPad From 2c339a822613ffeb7b89983bc63cb92818595fee Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 13 Oct 2021 13:15:42 -0600 Subject: [PATCH 03/10] Build the backported sink if the feature is enabled --- Cargo.toml | 3 ++- src/lib.rs | 13 +++++++++---- src/ndisinkcombiner/imp.rs | 10 ++++++++++ src/ndisinkcombiner/mod.rs | 3 +++ 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 328a15e..938b4e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ description = "NewTek NDI Plugin" edition = "2018" [dependencies] +libc = { version = "0.2", optional = true } glib = "0.14" gst = { package = "gstreamer", version = "0.17.4", features = ["v1_12"] } gst-base = { package = "gstreamer-base", version = "0.17" } @@ -27,7 +28,7 @@ default = ["interlaced-fields", "reference-timestamps", "sink"] interlaced-fields = ["gst/v1_16", "gst-video/v1_16"] reference-timestamps = ["gst/v1_14"] sink = ["gst/v1_18", "gst-base/v1_18"] -sink-v1_14 = ["gst/v1_14", "gst-base/v1_14"] +sink-v1_14 = ["libc", "gst/v1_14", "gst-base/v1_14"] advanced-sdk = [] [lib] diff --git a/src/lib.rs b/src/lib.rs index 2c4302d..17216de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,15 @@ mod device_provider; pub mod ndi; -#[cfg(feature = "sink")] + +#[cfg(feature = "sink-v1_14")] +#[path = "base/mod.rs"] +pub mod gst_base_compat; + +#[cfg(any(feature = "sink", feature = "sink-v1_14"))] mod ndisink; -#[cfg(feature = "sink")] +#[cfg(any(feature = "sink", feature = "sink-v1_14"))] mod ndisinkcombiner; -#[cfg(feature = "sink")] +#[cfg(any(feature = "sink", feature = "sink-v1_14"))] pub mod ndisinkmeta; mod ndisrc; mod ndisrcdemux; @@ -123,7 +128,7 @@ fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { ndisrc::register(plugin)?; ndisrcdemux::register(plugin)?; - #[cfg(feature = "sink")] + #[cfg(any(feature = "sink", feature = "sink-v1_14"))] { ndisinkcombiner::register(plugin)?; ndisink::register(plugin)?; diff --git a/src/ndisinkcombiner/imp.rs b/src/ndisinkcombiner/imp.rs index 4e1a165..4e38ed2 100644 --- a/src/ndisinkcombiner/imp.rs +++ b/src/ndisinkcombiner/imp.rs @@ -3,9 +3,19 @@ use glib::subclass::prelude::*; use gst::prelude::*; use gst::subclass::prelude::*; use gst::{gst_debug, gst_error, gst_trace, gst_warning}; + +#[cfg(not(feature = "sink-v1_14"))] use gst_base::prelude::*; +#[cfg(not(feature = "sink-v1_14"))] use gst_base::subclass::prelude::*; +#[cfg(feature = "sink-v1_14")] +use self::gst_base::prelude::*; +#[cfg(feature = "sink-v1_14")] +use self::gst_base::subclass::prelude::*; +#[cfg(feature = "sink-v1_14")] +use crate::gst_base_compat as gst_base; + use once_cell::sync::Lazy; use std::mem; diff --git a/src/ndisinkcombiner/mod.rs b/src/ndisinkcombiner/mod.rs index b86c4ca..7366833 100644 --- a/src/ndisinkcombiner/mod.rs +++ b/src/ndisinkcombiner/mod.rs @@ -1,5 +1,8 @@ use glib::prelude::*; +#[cfg(feature = "sink-v1_14")] +use crate::gst_base_compat as gst_base; + mod imp; glib::wrapper! { From 0e4fd10bc4a02cebda49f3eb44e36c1ff0efdedd Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 13 Oct 2021 13:39:18 -0600 Subject: [PATCH 04/10] Update README with backport instructions --- README.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f5ece5..5f3b782 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,25 @@ export GST_PLUGIN_PATH=`pwd`/target/debug gst-inspect-1.0 ndi ``` -By defult GStreamer 1.16 is required, to use only GStreamer 1.12 instead of 1.16, pass `--disable-default-features` to cargo. Only a subset of video formats is supported with this GStreamer version. +By default, GStreamer 1.18 is required to build the project entirely. Some elements support lower GStreamer versions, as shown in the following table. + +| Element | Required GStreamer Version | +|----------------------|----------------------------| +| src | v1.12 | +| interlaced-fields | v1.16 | +| reference-timestamps | v1.14 | +| sink | v1.18, v1.14* | + +To build the `src` element, pass the `--no-default-features` flag. Only a subset of video formats is supported with this GStreamer version. To build any of the other elements independently, specify them as features. For example, to build the sink element, pass it as: +``` +cargo build --no-default-features --features="sink" +``` +Multiple elements may be built if separated by commas (`--features="sink,reference-timestamps"`). + +There is a backport of the sink element for GStreamer 1.14. In order to build the backport, use the following special feature: +``` +cargo build --no-default-features --features="sink-v1_14" +``` If all went ok, you should see info related to the NDI element. To make the plugin available without using `GST_PLUGIN_PATH` it's necessary to copy the plugin to the gstreamer plugins folder. From 11e8785b2687bc5b8de0da903e18d637a261b83a Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 13 Oct 2021 14:55:38 -0600 Subject: [PATCH 05/10] Rename local aggregator build to NDI to avoid conflicts --- build.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/build.rs b/build.rs index 66e6b24..082c6c9 100644 --- a/build.rs +++ b/build.rs @@ -26,12 +26,12 @@ fn main() { "\"https://gitlab.freedesktop.org/gstreamer/gstreamer/issues/new\"", ); build.extra_warnings(false); - build.define("GstAggregator", "GstAggregatorFallback"); - build.define("GstAggregatorClass", "GstAggregatorFallbackClass"); - build.define("GstAggregatorPrivate", "GstAggregatorFallbackPrivate"); - build.define("GstAggregatorPad", "GstAggregatorFallbackPad"); - build.define("GstAggregatorPadClass", "GstAggregatorFallbackPadClass"); - build.define("GstAggregatorPadPrivate", "GstAggregatorFallbackPadPrivate"); + build.define("GstAggregator", "GstAggregatorNdi"); + build.define("GstAggregatorClass", "GstAggregatorNdiClass"); + build.define("GstAggregatorPrivate", "GstAggregatorNdiPrivate"); + build.define("GstAggregatorPad", "GstAggregatorNdiPad"); + build.define("GstAggregatorPadClass", "GstAggregatorNdiPadClass"); + build.define("GstAggregatorPadPrivate", "GstAggregatorNdiPadPrivate"); build.define("GST_BASE_API", "G_GNUC_INTERNAL"); build.compile("libgstaggregator-c.a"); From 744dc93bd6242f3c2a34afe07f3855e758626f7c Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 27 Oct 2021 18:19:41 -0600 Subject: [PATCH 06/10] Upgrade backported aggregator sources to latest available --- src/base/aggregator.rs | 162 +++++++++++- src/base/aggregator_pad.rs | 5 +- src/base/auto/aggregator.rs | 175 +++++++++++-- src/base/auto/aggregator_pad.rs | 17 +- src/base/auto/enums.rs | 84 ++++++ src/base/auto/mod.rs | 14 +- src/base/ffi.rs | 43 ++- src/base/gstaggregator.c | 391 +++++++++++++++++++++++----- src/base/gstaggregator.h | 60 ++++- src/base/subclass/aggregator.rs | 141 ++++++++-- src/base/subclass/aggregator_pad.rs | 4 +- src/base/subclass/mod.rs | 18 +- src/base/utils.rs | 2 +- 13 files changed, 972 insertions(+), 144 deletions(-) create mode 100644 src/base/auto/enums.rs diff --git a/src/base/aggregator.rs b/src/base/aggregator.rs index e17d0f6..89b6fb6 100644 --- a/src/base/aggregator.rs +++ b/src/base/aggregator.rs @@ -2,24 +2,33 @@ use super::ffi; use super::Aggregator; - +use glib::prelude::*; use glib::signal::{connect_raw, SignalHandlerId}; use glib::translate::*; -use glib::IsA; use glib::Value; -use gst::glib; -use gst::prelude::*; - use std::boxed::Box as Box_; use std::mem; +use std::mem::transmute; use std::ptr; pub trait AggregatorExtManual: 'static { + #[doc(alias = "get_allocator")] + #[doc(alias = "gst_aggregator_get_allocator")] fn allocator(&self) -> (Option, gst::AllocationParams); + #[doc(alias = "gst_aggregator_finish_buffer")] fn finish_buffer(&self, buffer: gst::Buffer) -> Result; + + #[doc(alias = "gst_aggregator_finish_buffer_list")] + fn finish_buffer_list( + &self, + bufferlist: gst::BufferList, + ) -> Result; + + #[doc(alias = "min-upstream-latency")] fn min_upstream_latency(&self) -> gst::ClockTime; + #[doc(alias = "min-upstream-latency")] fn set_min_upstream_latency(&self, min_upstream_latency: gst::ClockTime); #[doc(alias = "min-upstream-latency")] @@ -27,6 +36,36 @@ pub trait AggregatorExtManual: 'static { &self, f: F, ) -> SignalHandlerId; + + #[doc(alias = "gst_aggregator_update_segment")] + fn update_segment(&self, segment: &gst::FormattedSegment); + + #[doc(alias = "gst_aggregator_selected_samples")] + fn selected_samples( + &self, + pts: impl Into>, + dts: impl Into>, + duration: impl Into>, + info: Option<&gst::StructureRef>, + ); + + fn connect_samples_selected< + P, + F: Fn( + &P, + &gst::Segment, + Option, + Option, + Option, + Option<&gst::StructureRef>, + ) + Send + + 'static, + >( + &self, + f: F, + ) -> SignalHandlerId + where + P: IsA; } impl> AggregatorExtManual for O { @@ -52,6 +91,18 @@ impl> AggregatorExtManual for O { } } + fn finish_buffer_list( + &self, + bufferlist: gst::BufferList, + ) -> Result { + unsafe { + try_from_glib(ffi::gst_aggregator_finish_buffer_list( + self.as_ref().to_glib_none().0, + bufferlist.into_ptr(), + )) + } + } + fn min_upstream_latency(&self) -> gst::ClockTime { unsafe { let mut value = Value::from_type(::static_type()); @@ -85,13 +136,110 @@ impl> AggregatorExtManual for O { connect_raw( self.as_ptr() as *mut _, b"notify::min-upstream-latency\0".as_ptr() as *const _, - Some(mem::transmute::<_, unsafe extern "C" fn()>( + Some(transmute::<_, unsafe extern "C" fn()>( notify_min_upstream_latency_trampoline:: as *const (), )), Box_::into_raw(f), ) } } + + fn update_segment(&self, segment: &gst::FormattedSegment) { + unsafe { + ffi::gst_aggregator_update_segment( + self.as_ref().to_glib_none().0, + mut_override(segment.to_glib_none().0), + ) + } + } + + fn selected_samples( + &self, + pts: impl Into>, + dts: impl Into>, + duration: impl Into>, + info: Option<&gst::StructureRef>, + ) { + unsafe { + ffi::gst_aggregator_selected_samples( + self.as_ref().to_glib_none().0, + pts.into().into_glib(), + dts.into().into_glib(), + duration.into().into_glib(), + info.as_ref() + .map(|s| s.as_ptr() as *mut _) + .unwrap_or(ptr::null_mut()), + ); + } + } + + fn connect_samples_selected< + P, + F: Fn( + &P, + &gst::Segment, + Option, + Option, + Option, + Option<&gst::StructureRef>, + ) + Send + + 'static, + >( + &self, + f: F, + ) -> SignalHandlerId + where + P: IsA, + { + unsafe extern "C" fn samples_selected_trampoline< + P, + F: Fn( + &P, + &gst::Segment, + Option, + Option, + Option, + Option<&gst::StructureRef>, + ) + Send + + 'static, + >( + this: *mut ffi::GstAggregator, + segment: *mut gst::ffi::GstSegment, + pts: gst::ffi::GstClockTime, + dts: gst::ffi::GstClockTime, + duration: gst::ffi::GstClockTime, + info: *mut gst::ffi::GstStructure, + f: glib::ffi::gpointer, + ) where + P: IsA, + { + let f: &F = &*(f as *const F); + f( + Aggregator::from_glib_borrow(this).unsafe_cast_ref(), + &gst::Segment::from_glib_borrow(segment), + from_glib(pts), + from_glib(dts), + from_glib(duration), + if info.is_null() { + None + } else { + Some(gst::StructureRef::from_glib_borrow(info)) + }, + ) + } + + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"samples-selected\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + samples_selected_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } } unsafe extern "C" fn notify_min_upstream_latency_trampoline( @@ -102,5 +250,5 @@ unsafe extern "C" fn notify_min_upstream_latency_trampoline, { let f: &F = &*(f as *const F); - f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + f(Aggregator::from_glib_borrow(this).unsafe_cast_ref()) } diff --git a/src/base/aggregator_pad.rs b/src/base/aggregator_pad.rs index 794ba1c..524f258 100644 --- a/src/base/aggregator_pad.rs +++ b/src/base/aggregator_pad.rs @@ -1,11 +1,10 @@ // Take a look at the license at the top of the repository in the LICENSE file. use super::ffi; -use super::AggregatorPad; -use glib::object::IsA; +use super::AggregatorPad; +use glib::prelude::*; use glib::translate::*; -use gst::glib; pub trait AggregatorPadExtManual: 'static { #[doc(alias = "get_segment")] diff --git a/src/base/auto/aggregator.rs b/src/base/auto/aggregator.rs index 81f7600..1b9e8b6 100644 --- a/src/base/auto/aggregator.rs +++ b/src/base/auto/aggregator.rs @@ -5,17 +5,20 @@ use super::super::ffi; +use super::AggregatorPad; +use super::AggregatorStartTimeSelection; +use glib::object::Cast; +use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; - -use gst::glib; -use gst::prelude::*; - +use glib::StaticType; +use glib::ToValue; use std::boxed::Box as Box_; use std::mem::transmute; glib::wrapper! { + #[doc(alias = "GstAggregator")] pub struct Aggregator(Object) @extends gst::Element, gst::Object; match fn { @@ -29,10 +32,6 @@ unsafe impl Sync for Aggregator {} pub const NONE_AGGREGATOR: Option<&Aggregator> = None; pub trait AggregatorExt: 'static { - //#[doc(alias = "gst_aggregator_get_allocator")] - //#[doc(alias = "get_allocator")] - //fn allocator(&self, allocator: /*Ignored*/Option, params: /*Ignored*/gst::AllocationParams); - #[doc(alias = "gst_aggregator_get_buffer_pool")] #[doc(alias = "get_buffer_pool")] fn buffer_pool(&self) -> Option; @@ -44,6 +43,9 @@ pub trait AggregatorExt: 'static { #[doc(alias = "gst_aggregator_negotiate")] fn negotiate(&self) -> bool; + #[doc(alias = "gst_aggregator_peek_next_sample")] + fn peek_next_sample>(&self, pad: &P) -> Option; + #[doc(alias = "gst_aggregator_set_latency")] fn set_latency( &self, @@ -54,18 +56,33 @@ pub trait AggregatorExt: 'static { #[doc(alias = "gst_aggregator_set_src_caps")] fn set_src_caps(&self, caps: &gst::Caps); - #[doc(alias = "gst_aggregator_update_segment")] - fn update_segment(&self, segment: &gst::Segment); - #[doc(alias = "gst_aggregator_simple_get_next_time")] fn simple_get_next_time(&self) -> Option; + #[doc(alias = "emit-signals")] + fn emits_signals(&self) -> bool; + + #[doc(alias = "emit-signals")] + fn set_emit_signals(&self, emit_signals: bool); + #[doc(alias = "start-time")] fn start_time(&self) -> u64; #[doc(alias = "start-time")] fn set_start_time(&self, start_time: u64); + #[doc(alias = "start-time-selection")] + fn start_time_selection(&self) -> AggregatorStartTimeSelection; + + #[doc(alias = "start-time-selection")] + fn set_start_time_selection(&self, start_time_selection: AggregatorStartTimeSelection); + + #[doc(alias = "emit-signals")] + fn connect_emit_signals_notify( + &self, + f: F, + ) -> SignalHandlerId; + #[doc(alias = "latency")] fn connect_latency_notify(&self, f: F) -> SignalHandlerId; @@ -75,13 +92,15 @@ pub trait AggregatorExt: 'static { &self, f: F, ) -> SignalHandlerId; + + #[doc(alias = "start-time-selection")] + fn connect_start_time_selection_notify( + &self, + f: F, + ) -> SignalHandlerId; } impl> AggregatorExt for O { - //fn allocator(&self, allocator: /*Ignored*/Option, params: /*Ignored*/gst::AllocationParams) { - // unsafe { TODO: call ffi:gst_aggregator_get_allocator() } - //} - fn buffer_pool(&self) -> Option { unsafe { from_glib_full(ffi::gst_aggregator_get_buffer_pool( @@ -106,6 +125,15 @@ impl> AggregatorExt for O { } } + fn peek_next_sample>(&self, pad: &P) -> Option { + unsafe { + from_glib_full(ffi::gst_aggregator_peek_next_sample( + self.as_ref().to_glib_none().0, + pad.as_ref().to_glib_none().0, + )) + } + } + fn set_latency( &self, min_latency: gst::ClockTime, @@ -126,12 +154,6 @@ impl> AggregatorExt for O { } } - fn update_segment(&self, segment: &gst::Segment) { - unsafe { - ffi::gst_aggregator_update_segment(self.as_ref().to_glib_none().0, segment.to_glib_none().0); - } - } - fn simple_get_next_time(&self) -> Option { unsafe { from_glib(ffi::gst_aggregator_simple_get_next_time( @@ -140,6 +162,30 @@ impl> AggregatorExt for O { } } + fn emits_signals(&self) -> bool { + unsafe { + let mut value = glib::Value::from_type(::static_type()); + glib::gobject_ffi::g_object_get_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"emit-signals\0".as_ptr() as *const _, + value.to_glib_none_mut().0, + ); + value + .get() + .expect("Return Value for property `emit-signals` getter") + } + } + + fn set_emit_signals(&self, emit_signals: bool) { + unsafe { + glib::gobject_ffi::g_object_set_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"emit-signals\0".as_ptr() as *const _, + emit_signals.to_value().to_glib_none().0, + ); + } + } + fn start_time(&self) -> u64 { unsafe { let mut value = glib::Value::from_type(::static_type()); @@ -164,7 +210,59 @@ impl> AggregatorExt for O { } } - #[doc(alias = "latency")] + fn start_time_selection(&self) -> AggregatorStartTimeSelection { + unsafe { + let mut value = + glib::Value::from_type(::static_type()); + glib::gobject_ffi::g_object_get_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"start-time-selection\0".as_ptr() as *const _, + value.to_glib_none_mut().0, + ); + value + .get() + .expect("Return Value for property `start-time-selection` getter") + } + } + + fn set_start_time_selection(&self, start_time_selection: AggregatorStartTimeSelection) { + unsafe { + glib::gobject_ffi::g_object_set_property( + self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, + b"start-time-selection\0".as_ptr() as *const _, + start_time_selection.to_value().to_glib_none().0, + ); + } + } + + fn connect_emit_signals_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn notify_emit_signals_trampoline< + P: IsA, + F: Fn(&P) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregator, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f(Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::emit-signals\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + notify_emit_signals_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } + fn connect_latency_notify( &self, f: F, @@ -178,7 +276,7 @@ impl> AggregatorExt for O { f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); - f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + f(Aggregator::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_ = Box_::new(f); @@ -193,7 +291,6 @@ impl> AggregatorExt for O { } } - #[doc(alias = "start-time")] fn connect_start_time_notify( &self, f: F, @@ -207,7 +304,7 @@ impl> AggregatorExt for O { f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); - f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + f(Aggregator::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_ = Box_::new(f); @@ -221,4 +318,32 @@ impl> AggregatorExt for O { ) } } + + fn connect_start_time_selection_notify( + &self, + f: F, + ) -> SignalHandlerId { + unsafe extern "C" fn notify_start_time_selection_trampoline< + P: IsA, + F: Fn(&P) + Send + Sync + 'static, + >( + this: *mut ffi::GstAggregator, + _param_spec: glib::ffi::gpointer, + f: glib::ffi::gpointer, + ) { + let f: &F = &*(f as *const F); + f(Aggregator::from_glib_borrow(this).unsafe_cast_ref()) + } + unsafe { + let f: Box_ = Box_::new(f); + connect_raw( + self.as_ptr() as *mut _, + b"notify::start-time-selection\0".as_ptr() as *const _, + Some(transmute::<_, unsafe extern "C" fn()>( + notify_start_time_selection_trampoline:: as *const (), + )), + Box_::into_raw(f), + ) + } + } } diff --git a/src/base/auto/aggregator_pad.rs b/src/base/auto/aggregator_pad.rs index a4c0a56..7ed3b24 100644 --- a/src/base/auto/aggregator_pad.rs +++ b/src/base/auto/aggregator_pad.rs @@ -1,20 +1,22 @@ // This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) +// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git) // DO NOT EDIT use super::super::ffi; +use glib::object::Cast; +use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; - -use gst::glib; -use gst::prelude::*; - +use glib::StaticType; +use glib::ToValue; use std::boxed::Box as Box_; use std::mem::transmute; glib::wrapper! { + #[doc(alias = "GstAggregatorPad")] pub struct AggregatorPad(Object) @extends gst::Pad, gst::Object; match fn { @@ -49,6 +51,7 @@ pub trait AggregatorPadExt: 'static { #[doc(alias = "emit-signals")] fn set_emit_signals(&self, emit_signals: bool); + #[doc(alias = "buffer-consumed")] fn connect_buffer_consumed( &self, f: F, @@ -126,7 +129,6 @@ impl> AggregatorPadExt for O { } } - #[doc(alias = "buffer-consumed")] fn connect_buffer_consumed( &self, f: F, @@ -141,7 +143,7 @@ impl> AggregatorPadExt for O { ) { let f: &F = &*(f as *const F); f( - &AggregatorPad::from_glib_borrow(this).unsafe_cast_ref(), + AggregatorPad::from_glib_borrow(this).unsafe_cast_ref(), &from_glib_borrow(object), ) } @@ -158,7 +160,6 @@ impl> AggregatorPadExt for O { } } - #[doc(alias = "emit-signals")] fn connect_emit_signals_notify( &self, f: F, @@ -172,7 +173,7 @@ impl> AggregatorPadExt for O { f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); - f(&AggregatorPad::from_glib_borrow(this).unsafe_cast_ref()) + f(AggregatorPad::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_ = Box_::new(f); diff --git a/src/base/auto/enums.rs b/src/base/auto/enums.rs new file mode 100644 index 0000000..1226c4f --- /dev/null +++ b/src/base/auto/enums.rs @@ -0,0 +1,84 @@ +// This file was generated by gir (https://github.com/gtk-rs/gir) +// from gir-files (https://github.com/gtk-rs/gir-files) +// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git) +// DO NOT EDIT + +use super::super::ffi; + +use glib::translate::*; +use glib::value::FromValue; +use glib::value::ToValue; +use glib::StaticType; +use glib::Type; + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] +#[non_exhaustive] +#[doc(alias = "GstAggregatorStartTimeSelection")] +pub enum AggregatorStartTimeSelection { + #[doc(alias = "GST_AGGREGATOR_START_TIME_SELECTION_ZERO")] + Zero, + #[doc(alias = "GST_AGGREGATOR_START_TIME_SELECTION_FIRST")] + First, + #[doc(alias = "GST_AGGREGATOR_START_TIME_SELECTION_SET")] + Set, + #[doc(hidden)] + __Unknown(i32), +} + +#[doc(hidden)] +impl IntoGlib for AggregatorStartTimeSelection { + type GlibType = ffi::GstAggregatorStartTimeSelection; + + fn into_glib(self) -> ffi::GstAggregatorStartTimeSelection { + match self { + Self::Zero => ffi::GST_AGGREGATOR_START_TIME_SELECTION_ZERO, + Self::First => ffi::GST_AGGREGATOR_START_TIME_SELECTION_FIRST, + Self::Set => ffi::GST_AGGREGATOR_START_TIME_SELECTION_SET, + Self::__Unknown(value) => value, + } + } +} + +#[doc(hidden)] +impl FromGlib for AggregatorStartTimeSelection { + unsafe fn from_glib(value: ffi::GstAggregatorStartTimeSelection) -> Self { + match value { + ffi::GST_AGGREGATOR_START_TIME_SELECTION_ZERO => Self::Zero, + ffi::GST_AGGREGATOR_START_TIME_SELECTION_FIRST => Self::First, + ffi::GST_AGGREGATOR_START_TIME_SELECTION_SET => Self::Set, + value => Self::__Unknown(value), + } + } +} + +impl StaticType for AggregatorStartTimeSelection { + fn static_type() -> Type { + unsafe { from_glib(ffi::gst_aggregator_start_time_selection_get_type()) } + } +} + +impl glib::value::ValueType for AggregatorStartTimeSelection { + type Type = Self; +} + +unsafe impl<'a> FromValue<'a> for AggregatorStartTimeSelection { + type Checker = glib::value::GenericValueTypeChecker; + + unsafe fn from_value(value: &'a glib::Value) -> Self { + from_glib(glib::gobject_ffi::g_value_get_enum(value.to_glib_none().0)) + } +} + +impl ToValue for AggregatorStartTimeSelection { + fn to_value(&self) -> glib::Value { + let mut value = glib::Value::for_value_type::(); + unsafe { + glib::gobject_ffi::g_value_set_enum(value.to_glib_none_mut().0, self.into_glib()); + } + value + } + + fn value_type(&self) -> glib::Type { + Self::static_type() + } +} diff --git a/src/base/auto/mod.rs b/src/base/auto/mod.rs index 09fb05f..a87a88f 100644 --- a/src/base/auto/mod.rs +++ b/src/base/auto/mod.rs @@ -1,13 +1,19 @@ +// This file was generated by gir (https://github.com/gtk-rs/gir) +// from gir-files (https://github.com/gtk-rs/gir-files) +// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git) +// DO NOT EDIT + mod aggregator; -pub use self::aggregator::AggregatorExt; pub use self::aggregator::{Aggregator, NONE_AGGREGATOR}; mod aggregator_pad; -pub use self::aggregator_pad::AggregatorPadExt; pub use self::aggregator_pad::{AggregatorPad, NONE_AGGREGATOR_PAD}; +mod enums; +pub use self::enums::AggregatorStartTimeSelection; + #[doc(hidden)] pub mod traits { - pub use super::AggregatorExt; - pub use super::AggregatorPadExt; + pub use super::aggregator::AggregatorExt; + pub use super::aggregator_pad::AggregatorPadExt; } diff --git a/src/base/ffi.rs b/src/base/ffi.rs index 669736b..e1d0b18 100644 --- a/src/base/ffi.rs +++ b/src/base/ffi.rs @@ -96,7 +96,19 @@ pub struct GstAggregatorClass { *mut gst::GstQuery, ) -> gboolean, >, - pub _gst_reserved: [gpointer; 17], + pub finish_buffer_list: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut gst::GstBufferList + ) -> gst::GstFlowReturn + >, + pub peek_next_sample: Option< + unsafe extern "C" fn( + *mut GstAggregator, + *mut GstAggregatorPad, + ) -> *mut gst::GstSample, + >, + pub _gst_reserved: [gpointer; 15], } impl ::std::fmt::Debug for GstAggregatorClass { @@ -121,6 +133,8 @@ impl ::std::fmt::Debug for GstAggregatorClass { .field("negotiated_src_caps", &self.negotiated_src_caps) .field("decide_allocation", &self.decide_allocation) .field("propose_allocation", &self.propose_allocation) + .field("finish_buffer_list", &self.finish_buffer_list) + .field("peek_next_sample", &self.peek_next_sample) .finish() } } @@ -198,6 +212,12 @@ impl ::std::fmt::Debug for GstAggregatorPad { } } +// Enums +pub type GstAggregatorStartTimeSelection = c_int; +pub const GST_AGGREGATOR_START_TIME_SELECTION_ZERO: GstAggregatorStartTimeSelection = 0; +pub const GST_AGGREGATOR_START_TIME_SELECTION_FIRST: GstAggregatorStartTimeSelection = 1; +pub const GST_AGGREGATOR_START_TIME_SELECTION_SET: GstAggregatorStartTimeSelection = 2; + extern "C" { //========================================================================= // GstAggregator @@ -207,6 +227,10 @@ extern "C" { aggregator: *mut GstAggregator, buffer: *mut gst::GstBuffer, ) -> gst::GstFlowReturn; + pub fn gst_aggregator_finish_buffer_list( + aggregator: *mut GstAggregator, + buffer: *mut gst::GstBufferList, + ) -> gst::GstFlowReturn; pub fn gst_aggregator_negotiate(aggregator: *mut GstAggregator) -> gboolean; pub fn gst_aggregator_get_allocator( self_: *mut GstAggregator, @@ -223,6 +247,17 @@ extern "C" { pub fn gst_aggregator_set_src_caps(self_: *mut GstAggregator, caps: *mut gst::GstCaps); pub fn gst_aggregator_simple_get_next_time(self_: *mut GstAggregator) -> gst::GstClockTime; pub fn gst_aggregator_update_segment(self_: *mut GstAggregator, segment: *const gst::GstSegment); + pub fn gst_aggregator_peek_next_sample( + self_: *mut GstAggregator, + aggregator_pad: *mut GstAggregatorPad, + ) -> *mut gst::GstSample; + pub fn gst_aggregator_selected_samples ( + self_: *mut GstAggregator, + pts: gst::GstClockTime, + dts: gst::GstClockTime, + duration: gst::GstClockTime, + info: *mut gst::GstStructure + ); //========================================================================= // GstAggregatorPad @@ -233,4 +268,10 @@ extern "C" { pub fn gst_aggregator_pad_is_eos(pad: *mut GstAggregatorPad) -> gboolean; pub fn gst_aggregator_pad_peek_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer; pub fn gst_aggregator_pad_pop_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer; + + //========================================================================= + // GstAggregatorStartTimeSelection + //========================================================================= + pub fn gst_aggregator_start_time_selection_get_type() -> GType; } + diff --git a/src/base/gstaggregator.c b/src/base/gstaggregator.c index 436ed40..5a6e30f 100644 --- a/src/base/gstaggregator.c +++ b/src/base/gstaggregator.c @@ -30,27 +30,46 @@ * Control is given to the subclass when all pads have data. * * * Base class for mixers and muxers. Subclasses should at least implement - * the #GstAggregatorClass.aggregate() virtual method. + * the #GstAggregatorClass::aggregate virtual method. * * * Installs a #GstPadChainFunction, a #GstPadEventFullFunction and a * #GstPadQueryFunction to queue all serialized data packets per sink pad. * Subclasses should not overwrite those, but instead implement - * #GstAggregatorClass.sink_event() and #GstAggregatorClass.sink_query() as + * #GstAggregatorClass::sink_event and #GstAggregatorClass::sink_query as * needed. * * * When data is queued on all pads, the aggregate vmethod is called. * * * One can peek at the data on any given GstAggregatorPad with the - * gst_aggregator_pad_peek_buffer () method, and remove it from the pad + * gst_aggregator_pad_peek_buffer() method, and remove it from the pad * with the gst_aggregator_pad_pop_buffer () method. When a buffer * has been taken with pop_buffer (), a new buffer can be queued * on that pad. * + * * When gst_aggregator_pad_peek_buffer() or gst_aggregator_pad_has_buffer() + * are called, a reference is taken to the returned buffer, which stays + * valid until either: + * + * - gst_aggregator_pad_pop_buffer() is called, in which case the caller + * is guaranteed that the buffer they receive is the same as the peeked + * buffer. + * - gst_aggregator_pad_drop_buffer() is called, in which case the caller + * is guaranteed that the dropped buffer is the one that was peeked. + * - the subclass implementation of #GstAggregatorClass.aggregate returns. + * + * Subsequent calls to gst_aggregator_pad_peek_buffer() or + * gst_aggregator_pad_has_buffer() return / check the same buffer that was + * returned / checked, until one of the conditions listed above is met. + * + * Subclasses are only allowed to call these methods from the aggregate + * thread. + * * * If the subclass wishes to push a buffer downstream in its aggregate * implementation, it should do so through the - * gst_aggregator_finish_buffer () method. This method will take care + * gst_aggregator_finish_buffer() method. This method will take care * of sending and ordering mandatory events such as stream start, caps - * and segment. + * and segment. Buffer lists can also be pushed out with + * gst_aggregator_finish_buffer_list(). * * * Same goes for EOS events, which should not be pushed directly by the * subclass, it should instead return GST_FLOW_EOS in its aggregate @@ -91,32 +110,25 @@ #include "gstaggregator.h" -typedef enum -{ - GST_AGGREGATOR_START_TIME_SELECTION_ZERO, - GST_AGGREGATOR_START_TIME_SELECTION_FIRST, - GST_AGGREGATOR_START_TIME_SELECTION_SET -} GstAggregatorStartTimeSelection; - -static GType +GType gst_aggregator_start_time_selection_get_type (void) { static GType gtype = 0; - if (gtype == 0) { + if (g_once_init_enter (>ype)) { static const GEnumValue values[] = { {GST_AGGREGATOR_START_TIME_SELECTION_ZERO, - "Start at 0 running time (default)", "zero"}, + "GST_AGGREGATOR_START_TIME_SELECTION_ZERO", "zero"}, {GST_AGGREGATOR_START_TIME_SELECTION_FIRST, - "Start at first observed input running time", "first"}, + "GST_AGGREGATOR_START_TIME_SELECTION_FIRST", "first"}, {GST_AGGREGATOR_START_TIME_SELECTION_SET, - "Set start time with start-time property", "set"}, + "GST_AGGREGATOR_START_TIME_SELECTION_SET", "set"}, {0, NULL, NULL} }; + GType new_type = + g_enum_register_static ("GstAggregatorStartTimeSelection", values); - gtype = - g_enum_register_static ("GstAggregatorFallbackStartTimeSelection", - values); + g_once_init_leave (>ype, new_type); } return gtype; } @@ -133,7 +145,7 @@ static GstClockTime gst_aggregator_get_latency_property (GstAggregator * agg); static GstClockTime gst_aggregator_get_latency_unlocked (GstAggregator * self); static void gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, - GstBuffer * buffer); + GstBuffer * buffer, gboolean dequeued); GST_DEBUG_CATEGORY_STATIC (aggregator_debug); #define GST_CAT_DEFAULT aggregator_debug @@ -245,6 +257,7 @@ struct _GstAggregatorPadPrivate GQueue data; /* buffers, events and queries */ GstBuffer *clipped_buffer; guint num_buffers; + GstBuffer *peeked_buffer; /* used to track fill state of queues, only used with live-src and when * latency property is set to > 0 */ @@ -303,6 +316,31 @@ gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg) return TRUE; } +/** + * gst_aggregator_peek_next_sample: + * + * Use this function to determine what input buffers will be aggregated + * to produce the next output buffer. This should only be called from + * a #GstAggregator::samples-selected handler, and can be used to precisely + * control aggregating parameters for a given set of input samples. + * + * Returns: (nullable) (transfer full): The sample that is about to be aggregated. It may hold a #GstBuffer + * or a #GstBufferList. The contents of its info structure is subclass-dependent, + * and documented on a subclass basis. The buffers held by the sample are + * not writable. + * Since: 1.18 + */ +GstSample * +gst_aggregator_peek_next_sample (GstAggregator * agg, GstAggregatorPad * aggpad) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (agg); + + if (klass->peek_next_sample) + return (klass->peek_next_sample (agg, aggpad)); + + return NULL; +} + /************************************* * GstAggregator implementation * *************************************/ @@ -345,6 +383,7 @@ struct _GstAggregatorPrivate /* aggregate */ GstClockID aggregate_id; /* protected by src_lock */ + gboolean selected_samples_called_or_warned; /* protected by src_lock */ GMutex src_lock; GCond src_cond; @@ -360,6 +399,7 @@ struct _GstAggregatorPrivate /* properties */ gint64 latency; /* protected by both src_lock and all pad locks */ + gboolean emit_signals; }; /* Seek event forwarding helper */ @@ -379,6 +419,7 @@ typedef struct #define DEFAULT_MIN_UPSTREAM_LATENCY 0 #define DEFAULT_START_TIME_SELECTION GST_AGGREGATOR_START_TIME_SELECTION_ZERO #define DEFAULT_START_TIME (-1) +#define DEFAULT_EMIT_SIGNALS FALSE enum { @@ -387,9 +428,18 @@ enum PROP_MIN_UPSTREAM_LATENCY, PROP_START_TIME_SELECTION, PROP_START_TIME, + PROP_EMIT_SIGNALS, PROP_LAST }; +enum +{ + SIGNAL_SAMPLES_SELECTED, + LAST_SIGNAL, +}; + +static guint gst_aggregator_signals[LAST_SIGNAL] = { 0 }; + static GstFlowReturn gst_aggregator_pad_chain_internal (GstAggregator * self, GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head); @@ -643,6 +693,48 @@ gst_aggregator_finish_buffer (GstAggregator * aggregator, GstBuffer * buffer) return klass->finish_buffer (aggregator, buffer); } +static GstFlowReturn +gst_aggregator_default_finish_buffer_list (GstAggregator * self, + GstBufferList * bufferlist) +{ + gst_aggregator_push_mandatory_events (self); + + GST_OBJECT_LOCK (self); + if (!self->priv->flushing && gst_pad_is_active (self->srcpad)) { + GST_TRACE_OBJECT (self, "pushing bufferlist%" GST_PTR_FORMAT, bufferlist); + GST_OBJECT_UNLOCK (self); + return gst_pad_push_list (self->srcpad, bufferlist); + } else { + GST_INFO_OBJECT (self, "Not pushing (active: %i, flushing: %i)", + self->priv->flushing, gst_pad_is_active (self->srcpad)); + GST_OBJECT_UNLOCK (self); + gst_buffer_list_unref (bufferlist); + return GST_FLOW_OK; + } +} + +/** + * gst_aggregator_finish_buffer_list: + * @aggregator: The #GstAggregator + * @bufferlist: (transfer full): the #GstBufferList to push. + * + * This method will push the provided output buffer list downstream. If needed, + * mandatory events such as stream-start, caps, and segment events will be + * sent before pushing the buffer. + * + * Since: 1.18 + */ +GstFlowReturn +gst_aggregator_finish_buffer_list (GstAggregator * aggregator, + GstBufferList * bufferlist) +{ + GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (aggregator); + + g_assert (klass->finish_buffer_list != NULL); + + return klass->finish_buffer_list (aggregator, bufferlist); +} + static void gst_aggregator_push_eos (GstAggregator * self) { @@ -828,8 +920,6 @@ gst_aggregator_do_events_and_queries (GstElement * self, GstPad * epad, PAD_LOCK (pad); if (GST_EVENT_TYPE (event) == GST_EVENT_CAPS) { pad->priv->negotiated = ret; - if (!ret) - pad->priv->flow_return = data->flow_ret = GST_FLOW_NOT_NEGOTIATED; } if (g_queue_peek_tail (&pad->priv->data) == event) gst_event_unref (g_queue_pop_tail (&pad->priv->data)); @@ -871,21 +961,22 @@ gst_aggregator_pad_skip_buffers (GstElement * self, GstPad * epad, PAD_LOCK (aggpad); - item = g_queue_peek_head_link (&aggpad->priv->data); + item = g_queue_peek_tail_link (&aggpad->priv->data); while (item) { - GList *next = item->next; + GList *prev = item->prev; if (GST_IS_BUFFER (item->data) && klass->skip_buffer (aggpad, agg, item->data)) { GST_LOG_OBJECT (aggpad, "Skipping %" GST_PTR_FORMAT, item->data); - gst_aggregator_pad_buffer_consumed (aggpad, GST_BUFFER (item->data)); + gst_aggregator_pad_buffer_consumed (aggpad, GST_BUFFER (item->data), + TRUE); gst_buffer_unref (item->data); g_queue_delete_link (&aggpad->priv->data, item); } else { break; } - item = next; + item = prev; } PAD_UNLOCK (aggpad); @@ -893,6 +984,22 @@ gst_aggregator_pad_skip_buffers (GstElement * self, GstPad * epad, return TRUE; } +static gboolean +gst_aggregator_pad_reset_peeked_buffer (GstElement * self, GstPad * epad, + gpointer user_data) +{ + GstAggregatorPad *aggpad = (GstAggregatorPad *) epad; + + PAD_LOCK (aggpad); + + gst_buffer_replace (&aggpad->priv->peeked_buffer, NULL); + + PAD_UNLOCK (aggpad); + + return TRUE; +} + + static void gst_aggregator_pad_set_flushing (GstAggregatorPad * aggpad, GstFlowReturn flow_return, gboolean full) @@ -957,7 +1064,7 @@ gst_aggregator_default_negotiated_src_caps (GstAggregator * agg, GstCaps * caps) static gboolean gst_aggregator_set_allocation (GstAggregator * self, GstBufferPool * pool, GstAllocator * allocator, - GstAllocationParams * params, GstQuery * query) + const GstAllocationParams * params, GstQuery * query) { GstAllocator *oldalloc; GstBufferPool *oldpool; @@ -1175,7 +1282,7 @@ gst_aggregator_negotiate_unlocked (GstAggregator * self) * * Negotiates src pad caps with downstream elements. * Unmarks GST_PAD_FLAG_NEED_RECONFIGURE in any case. But marks it again - * if #GstAggregatorClass.negotiate() fails. + * if #GstAggregatorClass::negotiate fails. * * Returns: %TRUE if the negotiation succeeded, else %FALSE. * @@ -1227,8 +1334,11 @@ gst_aggregator_aggregate_func (GstAggregator * self) /* Ensure we have buffers ready (either in clipped_buffer or at the head of * the queue */ - if (!gst_aggregator_wait_and_check (self, &timeout)) + if (!gst_aggregator_wait_and_check (self, &timeout)) { + gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self), + gst_aggregator_pad_reset_peeked_buffer, NULL); continue; + } if (gst_pad_check_reconfigure (GST_AGGREGATOR_SRC_PAD (self))) { if (!gst_aggregator_negotiate_unlocked (self)) { @@ -1246,6 +1356,16 @@ gst_aggregator_aggregate_func (GstAggregator * self) flow_return = klass->aggregate (self, timeout); } + gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self), + gst_aggregator_pad_reset_peeked_buffer, NULL); + + if (!priv->selected_samples_called_or_warned) { + GST_FIXME_OBJECT (self, + "Subclass should call gst_aggregator_selected_samples() from its " + "aggregate implementation."); + priv->selected_samples_called_or_warned = TRUE; + } + if (flow_return == GST_AGGREGATOR_FLOW_NEED_DATA) continue; @@ -1300,6 +1420,10 @@ gst_aggregator_start (GstAggregator * self) self->priv->send_eos = TRUE; self->priv->srccaps = NULL; + self->priv->has_peer_latency = FALSE; + self->priv->peer_latency_live = FALSE; + self->priv->peer_latency_min = self->priv->peer_latency_max = 0; + gst_aggregator_set_allocation (self, NULL, NULL, NULL, NULL); klass = GST_AGGREGATOR_GET_CLASS (self); @@ -1754,6 +1878,13 @@ gst_aggregator_change_state (GstElement * element, GstStateChange transition) if (!gst_aggregator_start (self)) goto error_start; break; + case GST_STATE_CHANGE_PAUSED_TO_PLAYING: + /* Wake up any waiting as now we have a clock and can do + * proper waiting on the clock if necessary */ + SRC_LOCK (self); + SRC_BROADCAST (self); + SRC_UNLOCK (self); + break; default: break; } @@ -1771,6 +1902,13 @@ gst_aggregator_change_state (GstElement * element, GstStateChange transition) GST_ERROR_OBJECT (self, "Subclass failed to stop."); } break; + case GST_STATE_CHANGE_PLAYING_TO_PAUSED: + /* Wake up any waiting as now clock might be gone and we might + * need to wait on the condition variable again */ + SRC_LOCK (self); + SRC_BROADCAST (self); + SRC_UNLOCK (self); + break; default: break; } @@ -1800,6 +1938,7 @@ gst_aggregator_release_pad (GstElement * element, GstPad * pad) SRC_LOCK (self); gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, TRUE); + gst_buffer_replace (&aggpad->priv->peeked_buffer, NULL); gst_element_remove_pad (element, pad); self->priv->has_peer_latency = FALSE; @@ -1896,7 +2035,7 @@ gst_aggregator_request_new_pad (GstElement * element, return GST_PAD (agg_pad); } -/* Must be called with SRC_LOCK held */ +/* Must be called with SRC_LOCK held, temporarily releases it! */ static gboolean gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query) @@ -1904,7 +2043,10 @@ gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query) gboolean query_ret, live; GstClockTime our_latency, min, max; + /* Temporarily release the lock to do the query. */ + SRC_UNLOCK (self); query_ret = gst_pad_query_default (self->srcpad, GST_OBJECT (self), query); + SRC_LOCK (self); if (!query_ret) { GST_WARNING_OBJECT (self, "Latency query failed"); @@ -1930,10 +2072,12 @@ gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query) } if (min > max && GST_CLOCK_TIME_IS_VALID (max)) { + SRC_UNLOCK (self); GST_ELEMENT_WARNING (self, CORE, CLOCK, (NULL), ("Impossible to configure latency: max %" GST_TIME_FORMAT " < min %" GST_TIME_FORMAT ". Add queues or other buffering elements.", GST_TIME_ARGS (max), GST_TIME_ARGS (min))); + SRC_LOCK (self); return FALSE; } @@ -1964,7 +2108,8 @@ gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query) } /* - * MUST be called with the src_lock held. + * MUST be called with the src_lock held. Temporarily releases the lock inside + * gst_aggregator_query_latency_unlocked() to do the actual query! * * See gst_aggregator_get_latency() for doc */ @@ -2515,6 +2660,9 @@ gst_aggregator_set_property (GObject * object, guint prop_id, case PROP_START_TIME: agg->priv->start_time = g_value_get_uint64 (value); break; + case PROP_EMIT_SIGNALS: + agg->priv->emit_signals = g_value_get_boolean (value); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -2542,6 +2690,9 @@ gst_aggregator_get_property (GObject * object, guint prop_id, case PROP_START_TIME: g_value_set_uint64 (value, agg->priv->start_time); break; + case PROP_EMIT_SIGNALS: + g_value_set_boolean (value, agg->priv->emit_signals); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -2564,6 +2715,7 @@ gst_aggregator_class_init (GstAggregatorClass * klass) g_type_class_adjust_private_offset (klass, &aggregator_private_offset); klass->finish_buffer = gst_aggregator_default_finish_buffer; + klass->finish_buffer_list = gst_aggregator_default_finish_buffer_list; klass->sink_event = gst_aggregator_default_sink_event; klass->sink_query = gst_aggregator_default_sink_query; @@ -2633,6 +2785,40 @@ gst_aggregator_class_init (GstAggregatorClass * klass) "Start time to use if start-time-selection=set", 0, G_MAXUINT64, DEFAULT_START_TIME, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + /** + * GstAggregator:emit-signals: + * + * Enables the emission of signals such as #GstAggregator::samples-selected + * + * Since: 1.18 + */ + g_object_class_install_property (gobject_class, PROP_EMIT_SIGNALS, + g_param_spec_boolean ("emit-signals", "Emit signals", + "Send signals", DEFAULT_EMIT_SIGNALS, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + /** + * GstAggregator::samples-selected: + * @aggregator: The #GstAggregator that emitted the signal + * @segment: The #GstSegment the next output buffer is part of + * @pts: The presentation timestamp of the next output buffer + * @dts: The decoding timestamp of the next output buffer + * @duration: The duration of the next output buffer + * @info: (nullable): a #GstStructure containing additional information + * + * Signals that the #GstAggregator subclass has selected the next set + * of input samples it will aggregate. Handlers may call + * gst_aggregator_peek_next_sample() at that point. + * + * Since: 1.18 + */ + gst_aggregator_signals[SIGNAL_SAMPLES_SELECTED] = + g_signal_new ("samples-selected", G_TYPE_FROM_CLASS (klass), + G_SIGNAL_RUN_FIRST, 0, NULL, NULL, NULL, G_TYPE_NONE, 5, + GST_TYPE_SEGMENT | G_SIGNAL_TYPE_STATIC_SCOPE, GST_TYPE_CLOCK_TIME, + GST_TYPE_CLOCK_TIME, GST_TYPE_CLOCK_TIME, + GST_TYPE_STRUCTURE | G_SIGNAL_TYPE_STATIC_SCOPE); } static inline gpointer @@ -2717,7 +2903,7 @@ gst_aggregator_get_type (void) }; _type = g_type_register_static (GST_TYPE_ELEMENT, - "GstAggregatorFallback", &info, G_TYPE_FLAG_ABSTRACT); + "GstAggregator", &info, G_TYPE_FLAG_ABSTRACT); aggregator_private_offset = g_type_add_instance_private (_type, sizeof (GstAggregatorPrivate)); @@ -2731,6 +2917,8 @@ gst_aggregator_get_type (void) static gboolean gst_aggregator_pad_has_space (GstAggregator * self, GstAggregatorPad * aggpad) { + guint64 max_time_level; + /* Empty queue always has space */ if (aggpad->priv->num_buffers == 0 && aggpad->priv->clipped_buffer == NULL) return TRUE; @@ -2744,8 +2932,13 @@ gst_aggregator_pad_has_space (GstAggregator * self, GstAggregatorPad * aggpad) if (self->priv->latency == 0) return FALSE; + /* On top of our latency, we also want to allow buffering up to the + * minimum upstream latency to allow queue free sources with lower then + * upstream latency. */ + max_time_level = self->priv->latency + self->priv->upstream_latency_min; + /* Allow no more buffers than the latency */ - return (aggpad->priv->time_level <= self->priv->latency); + return (aggpad->priv->time_level <= max_time_level); } /* Must be called with the PAD_LOCK held */ @@ -3007,6 +3200,7 @@ gst_aggregator_pad_finalize (GObject * object) { GstAggregatorPad *pad = (GstAggregatorPad *) object; + gst_buffer_replace (&pad->priv->peeked_buffer, NULL); g_cond_clear (&pad->priv->event_cond); g_mutex_clear (&pad->priv->flush_lock); g_mutex_clear (&pad->priv->lock); @@ -3069,6 +3263,7 @@ gst_aggregator_pad_class_init (GstAggregatorPadClass * klass) /** * GstAggregatorPad:buffer-consumed: + * @aggregator: The #GstAggregator that emitted the signal * @buffer: The buffer that was consumed * * Signals that a buffer was consumed. As aggregator pads store buffers @@ -3113,10 +3308,12 @@ gst_aggregator_pad_init (GstAggregatorPad * pad) /* Must be called with the PAD_LOCK held */ static void -gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, GstBuffer * buffer) +gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, GstBuffer * buffer, + gboolean dequeued) { - pad->priv->num_buffers--; - GST_TRACE_OBJECT (pad, "Consuming buffer %" GST_PTR_FORMAT, buffer); + if (dequeued) + pad->priv->num_buffers--; + if (buffer && pad->priv->emit_signals) { g_signal_emit (pad, gst_aggregator_pad_signals[PAD_SIGNAL_BUFFER_CONSUMED], 0, buffer); @@ -3157,7 +3354,7 @@ gst_aggregator_pad_clip_buffer_unlocked (GstAggregatorPad * pad) buffer = aggclass->clip (self, pad, buffer); if (buffer == NULL) { - gst_aggregator_pad_buffer_consumed (pad, buffer); + gst_aggregator_pad_buffer_consumed (pad, buffer, TRUE); GST_TRACE_OBJECT (pad, "Clipping consumed the buffer"); } } @@ -3175,28 +3372,50 @@ gst_aggregator_pad_clip_buffer_unlocked (GstAggregatorPad * pad) * * Steal the ref to the buffer currently queued in @pad. * - * Returns: (transfer full): The buffer in @pad or NULL if no buffer was + * Returns: (nullable) (transfer full): The buffer in @pad or NULL if no buffer was * queued. You should unref the buffer after usage. */ GstBuffer * gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad) { - GstBuffer *buffer; + GstBuffer *buffer = NULL; PAD_LOCK (pad); - if (pad->priv->flow_return != GST_FLOW_OK) { - PAD_UNLOCK (pad); - return NULL; + /* If the subclass has already peeked a buffer, we guarantee + * that it receives the same buffer, no matter if the pad has + * errored out / been flushed in the meantime. + */ + if (pad->priv->peeked_buffer) { + buffer = pad->priv->peeked_buffer; + goto done; } - gst_aggregator_pad_clip_buffer_unlocked (pad); + if (pad->priv->flow_return != GST_FLOW_OK) + goto done; + gst_aggregator_pad_clip_buffer_unlocked (pad); buffer = pad->priv->clipped_buffer; +done: if (buffer) { - pad->priv->clipped_buffer = NULL; - gst_aggregator_pad_buffer_consumed (pad, buffer); + if (pad->priv->clipped_buffer != NULL) { + /* Here we still hold a reference to both the clipped buffer + * and possibly the peeked buffer, we transfer the first and + * potentially release the second + */ + gst_aggregator_pad_buffer_consumed (pad, buffer, TRUE); + pad->priv->clipped_buffer = NULL; + gst_buffer_replace (&pad->priv->peeked_buffer, NULL); + } else { + /* Here our clipped buffer has already been released, for + * example because of a flush. We thus transfer the reference + * to the peeked buffer to the caller, and we don't decrement + * pad.num_buffers as it has already been done elsewhere + */ + gst_aggregator_pad_buffer_consumed (pad, buffer, FALSE); + pad->priv->peeked_buffer = NULL; + } GST_DEBUG_OBJECT (pad, "Consumed: %" GST_PTR_FORMAT, buffer); } @@ -3231,31 +3450,36 @@ gst_aggregator_pad_drop_buffer (GstAggregatorPad * pad) * gst_aggregator_pad_peek_buffer: * @pad: the pad to get buffer from * - * Returns: (transfer full): A reference to the buffer in @pad or + * Returns: (nullable) (transfer full): A reference to the buffer in @pad or * NULL if no buffer was queued. You should unref the buffer after * usage. */ GstBuffer * gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad) { - GstBuffer *buffer; + GstBuffer *buffer = NULL; PAD_LOCK (pad); - if (pad->priv->flow_return != GST_FLOW_OK) { - PAD_UNLOCK (pad); - return NULL; + if (pad->priv->peeked_buffer) { + buffer = gst_buffer_ref (pad->priv->peeked_buffer); + goto done; } + if (pad->priv->flow_return != GST_FLOW_OK) + goto done; + gst_aggregator_pad_clip_buffer_unlocked (pad); if (pad->priv->clipped_buffer) { buffer = gst_buffer_ref (pad->priv->clipped_buffer); + pad->priv->peeked_buffer = gst_buffer_ref (buffer); } else { buffer = NULL; } - PAD_UNLOCK (pad); +done: + PAD_UNLOCK (pad); return buffer; } @@ -3277,8 +3501,15 @@ gst_aggregator_pad_has_buffer (GstAggregatorPad * pad) gboolean has_buffer; PAD_LOCK (pad); - gst_aggregator_pad_clip_buffer_unlocked (pad); - has_buffer = (pad->priv->clipped_buffer != NULL); + + if (pad->priv->peeked_buffer) { + has_buffer = TRUE; + } else { + gst_aggregator_pad_clip_buffer_unlocked (pad); + has_buffer = (pad->priv->clipped_buffer != NULL); + if (has_buffer) + pad->priv->peeked_buffer = gst_buffer_ref (pad->priv->clipped_buffer); + } PAD_UNLOCK (pad); return has_buffer; @@ -3383,7 +3614,7 @@ gst_aggregator_set_latency (GstAggregator * self, * gst_aggregator_get_buffer_pool: * @self: a #GstAggregator * - * Returns: (transfer full): the instance of the #GstBufferPool used + * Returns: (transfer full) (nullable): the instance of the #GstBufferPool used * by @trans; free it after use it */ GstBufferPool * @@ -3405,9 +3636,9 @@ gst_aggregator_get_buffer_pool (GstAggregator * self) /** * gst_aggregator_get_allocator: * @self: a #GstAggregator - * @allocator: (out) (allow-none) (transfer full): the #GstAllocator + * @allocator: (out) (optional) (nullable) (transfer full): the #GstAllocator * used - * @params: (out) (allow-none) (transfer full): the + * @params: (out caller-allocates) (optional): the * #GstAllocationParams of @allocator * * Lets #GstAggregator sub-classes get the memory @allocator @@ -3433,7 +3664,7 @@ gst_aggregator_get_allocator (GstAggregator * self, * gst_aggregator_simple_get_next_time: * @self: A #GstAggregator * - * This is a simple #GstAggregatorClass.get_next_time() implementation that + * This is a simple #GstAggregatorClass::get_next_time implementation that * just looks at the #GstSegment on the srcpad of the aggregator and bases * the next time on the running time there. * @@ -3473,10 +3704,13 @@ gst_aggregator_simple_get_next_time (GstAggregator * self) * source pad, instead of directly pushing new segment events * downstream. * + * Subclasses MUST call this before gst_aggregator_selected_samples(), + * if it is used at all. + * * Since: 1.18 */ void -gst_aggregator_update_segment (GstAggregator * self, GstSegment * segment) +gst_aggregator_update_segment (GstAggregator * self, const GstSegment * segment) { g_return_if_fail (GST_IS_AGGREGATOR (self)); g_return_if_fail (segment != NULL); @@ -3487,5 +3721,44 @@ gst_aggregator_update_segment (GstAggregator * self, GstSegment * segment) GST_OBJECT_LOCK (self); GST_AGGREGATOR_PAD (self->srcpad)->segment = *segment; self->priv->send_segment = TRUE; + /* we have a segment from the subclass now and really shouldn't override + * anything in that segment anymore, like the segment.position */ + self->priv->first_buffer = FALSE; GST_OBJECT_UNLOCK (self); } + +/** + * gst_aggregator_selected_samples: + * @pts: The presentation timestamp of the next output buffer + * @dts: The decoding timestamp of the next output buffer + * @duration: The duration of the next output buffer + * @info: (nullable): a #GstStructure containing additional information + * + * Subclasses should call this when they have prepared the + * buffers they will aggregate for each of their sink pads, but + * before using any of the properties of the pads that govern + * *how* aggregation should be performed, for example z-index + * for video aggregators. + * + * If gst_aggregator_update_segment() is used by the subclass, + * it MUST be called before gst_aggregator_selected_samples(). + * + * This function MUST only be called from the #GstAggregatorClass::aggregate() + * function. + * + * Since: 1.18 + */ +void +gst_aggregator_selected_samples (GstAggregator * self, + GstClockTime pts, GstClockTime dts, GstClockTime duration, + GstStructure * info) +{ + g_return_if_fail (GST_IS_AGGREGATOR (self)); + + if (self->priv->emit_signals) { + g_signal_emit (self, gst_aggregator_signals[SIGNAL_SAMPLES_SELECTED], 0, + &GST_AGGREGATOR_PAD (self->srcpad)->segment, pts, dts, duration, info); + } + + self->priv->selected_samples_called_or_warned = TRUE; +} diff --git a/src/base/gstaggregator.h b/src/base/gstaggregator.h index 100f291..86fc70f 100644 --- a/src/base/gstaggregator.h +++ b/src/base/gstaggregator.h @@ -22,6 +22,7 @@ #define __GST_AGGREGATOR_H__ #include +#include G_BEGIN_DECLS @@ -329,8 +330,28 @@ struct _GstAggregatorClass { GstAggregatorPad * aggregator_pad, GstQuery * query); + /** + * GstAggregatorClass::finish_buffer_list: + * + * Optional. Equivalent of #GstAggregatorClass::finish_buffer for + * buffer lists. + * + * Since: 1.18 + */ + GstFlowReturn (*finish_buffer_list) (GstAggregator * aggregator, + GstBufferList * bufferlist); + /** + * GstAggregatorClass::peek_next_sample: + * + * See gst_aggregator_peek_next_sample(). + * + * Since: 1.18 + */ + GstSample * (*peek_next_sample) (GstAggregator *aggregator, + GstAggregatorPad * aggregator_pad); + /*< private >*/ - gpointer _gst_reserved[GST_PADDING_LARGE-3]; + gpointer _gst_reserved[GST_PADDING_LARGE-5]; }; /************************************ @@ -355,6 +376,10 @@ GST_BASE_API GstFlowReturn gst_aggregator_finish_buffer (GstAggregator * aggregator, GstBuffer * buffer); +GST_BASE_API +GstFlowReturn gst_aggregator_finish_buffer_list (GstAggregator * aggregator, + GstBufferList * bufferlist); + GST_BASE_API void gst_aggregator_set_src_caps (GstAggregator * self, GstCaps * caps); @@ -386,7 +411,38 @@ GstClockTime gst_aggregator_simple_get_next_time (GstAggregator GST_BASE_API void gst_aggregator_update_segment (GstAggregator * self, - GstSegment * segment); + const GstSegment * segment); + +GST_BASE_API +GstSample * gst_aggregator_peek_next_sample (GstAggregator *self, + GstAggregatorPad * pad); + +GST_BASE_API +void gst_aggregator_selected_samples (GstAggregator * self, + GstClockTime pts, + GstClockTime dts, + GstClockTime duration, + GstStructure * info); + +/** + * GstAggregatorStartTimeSelection: + * @GST_AGGREGATOR_START_TIME_SELECTION_ZERO: Start at running time 0. + * @GST_AGGREGATOR_START_TIME_SELECTION_FIRST: Start at the running time of + * the first buffer that is received. + * @GST_AGGREGATOR_START_TIME_SELECTION_SET: Start at the running time + * selected by the `start-time` property. + * + * Since: 1.18 + */ +typedef enum +{ + GST_AGGREGATOR_START_TIME_SELECTION_ZERO, + GST_AGGREGATOR_START_TIME_SELECTION_FIRST, + GST_AGGREGATOR_START_TIME_SELECTION_SET +} GstAggregatorStartTimeSelection; + +GST_BASE_API +GType gst_aggregator_start_time_selection_get_type (void); G_END_DECLS diff --git a/src/base/subclass/aggregator.rs b/src/base/subclass/aggregator.rs index af54d40..e007c1e 100644 --- a/src/base/subclass/aggregator.rs +++ b/src/base/subclass/aggregator.rs @@ -2,9 +2,9 @@ use super::super::ffi; +use glib::prelude::*; use glib::translate::*; -use gst::glib; -use gst::prelude::*; + use gst::subclass::prelude::*; use std::ptr; @@ -26,6 +26,14 @@ pub trait AggregatorImpl: AggregatorImplExt + ElementImpl { self.parent_clip(aggregator, aggregator_pad, buffer) } + fn finish_buffer_list( + &self, + aggregator: &Self::Type, + buffer_list: gst::BufferList, + ) -> Result { + self.parent_finish_buffer_list(aggregator, buffer_list) + } + fn finish_buffer( &self, aggregator: &Self::Type, @@ -140,6 +148,14 @@ pub trait AggregatorImpl: AggregatorImplExt + ElementImpl { fn negotiate(&self, aggregator: &Self::Type) -> bool { self.parent_negotiate(aggregator) } + + fn peek_next_sample( + &self, + aggregator: &Self::Type, + pad: &AggregatorPad, + ) -> Option { + self.parent_peek_next_sample(aggregator, pad) + } } pub trait AggregatorImplExt: ObjectSubclass { @@ -158,6 +174,12 @@ pub trait AggregatorImplExt: ObjectSubclass { buffer: gst::Buffer, ) -> Result; + fn parent_finish_buffer_list( + &self, + aggregator: &Self::Type, + buffer_list: gst::BufferList, + ) -> Result; + fn parent_sink_event( &self, aggregator: &Self::Type, @@ -232,6 +254,12 @@ pub trait AggregatorImplExt: ObjectSubclass { ) -> Result<(), gst::LoggableError>; fn parent_negotiate(&self, aggregator: &Self::Type) -> bool; + + fn parent_peek_next_sample( + &self, + aggregator: &Self::Type, + pad: &AggregatorPad, + ) -> Option; } impl AggregatorImplExt for T { @@ -289,6 +317,24 @@ impl AggregatorImplExt for T { } } + fn parent_finish_buffer_list( + &self, + aggregator: &Self::Type, + buffer_list: gst::BufferList, + ) -> Result { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + let f = (*parent_class) + .finish_buffer_list + .expect("Missing parent function `finish_buffer_list`"); + try_from_glib(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + buffer_list.into_ptr(), + )) + } + } + fn parent_sink_event( &self, aggregator: &Self::Type, @@ -336,7 +382,7 @@ impl AggregatorImplExt for T { query: &mut gst::QueryRef, ) -> bool { unsafe { - let data = T::type_data(); + let data = Self::type_data(); let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; let f = (*parent_class) .sink_query @@ -601,6 +647,26 @@ impl AggregatorImplExt for T { .unwrap_or(true) } } + + fn parent_peek_next_sample( + &self, + aggregator: &Self::Type, + pad: &AggregatorPad, + ) -> Option { + unsafe { + let data = Self::type_data(); + let parent_class = data.as_ref().parent_class() as *mut ffi::GstAggregatorClass; + (*parent_class) + .peek_next_sample + .map(|f| { + from_glib_full(f( + aggregator.unsafe_cast_ref::().to_glib_none().0, + pad.to_glib_none().0, + )) + }) + .unwrap_or(None) + } + } } unsafe impl IsSubclassable for Aggregator { @@ -627,6 +693,8 @@ unsafe impl IsSubclassable for Aggregator { klass.sink_event_pre_queue = Some(aggregator_sink_event_pre_queue::); klass.sink_query_pre_queue = Some(aggregator_sink_query_pre_queue::); klass.negotiate = Some(aggregator_negotiate::); + klass.peek_next_sample = Some(aggregator_peek_next_sample::); + klass.finish_buffer_list = Some(aggregator_finish_buffer_list::); } } @@ -642,7 +710,7 @@ unsafe extern "C" fn aggregator_flush( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { imp.flush(wrap.unsafe_cast_ref()).into() }) .into_glib() @@ -657,7 +725,7 @@ unsafe extern "C" fn aggregator_clip( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - let ret = gst::panic_to_error!(&wrap, &imp.panicked(), None, { + let ret = gst::panic_to_error!(&wrap, imp.panicked(), None, { imp.clip( wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator_pad), @@ -676,13 +744,28 @@ unsafe extern "C" fn aggregator_finish_buffer( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { imp.finish_buffer(wrap.unsafe_cast_ref(), from_glib_full(buffer)) .into() }) .into_glib() } +unsafe extern "C" fn aggregator_finish_buffer_list( + ptr: *mut ffi::GstAggregator, + buffer_list: *mut gst::ffi::GstBufferList, +) -> gst::ffi::GstFlowReturn { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { + imp.finish_buffer_list(wrap.unsafe_cast_ref(), from_glib_full(buffer_list)) + .into() + }) + .into_glib() +} + unsafe extern "C" fn aggregator_sink_event( ptr: *mut ffi::GstAggregator, aggregator_pad: *mut ffi::GstAggregatorPad, @@ -692,7 +775,7 @@ unsafe extern "C" fn aggregator_sink_event( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(wrap, &imp.panicked(), false, { + gst::panic_to_error!(wrap, imp.panicked(), false, { imp.sink_event( wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator_pad), @@ -711,7 +794,7 @@ unsafe extern "C" fn aggregator_sink_event_pre_queue( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { imp.sink_event_pre_queue( wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator_pad), @@ -731,7 +814,7 @@ unsafe extern "C" fn aggregator_sink_query( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { imp.sink_query( wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator_pad), @@ -750,7 +833,7 @@ unsafe extern "C" fn aggregator_sink_query_pre_queue( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { imp.sink_query_pre_queue( wrap.unsafe_cast_ref(), &from_glib_borrow(aggregator_pad), @@ -768,7 +851,7 @@ unsafe extern "C" fn aggregator_src_event( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { imp.src_event(wrap.unsafe_cast_ref(), from_glib_full(event)) }) .into_glib() @@ -782,7 +865,7 @@ unsafe extern "C" fn aggregator_src_query( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { imp.src_query(wrap.unsafe_cast_ref(), gst::QueryRef::from_mut_ptr(query)) }) .into_glib() @@ -797,7 +880,7 @@ unsafe extern "C" fn aggregator_src_activate( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { match imp.src_activate(wrap.unsafe_cast_ref(), from_glib(mode), from_glib(active)) { Ok(()) => true, Err(err) => { @@ -817,7 +900,7 @@ unsafe extern "C" fn aggregator_aggregate( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { imp.aggregate(wrap.unsafe_cast_ref(), from_glib(timeout)) .into() }) @@ -831,7 +914,7 @@ unsafe extern "C" fn aggregator_start( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { match imp.start(wrap.unsafe_cast_ref()) { Ok(()) => true, Err(err) => { @@ -850,7 +933,7 @@ unsafe extern "C" fn aggregator_stop( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { match imp.stop(wrap.unsafe_cast_ref()) { Ok(()) => true, Err(err) => { @@ -869,7 +952,7 @@ unsafe extern "C" fn aggregator_get_next_time( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::ClockTime::NONE, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::ClockTime::NONE, { imp.next_time(wrap.unsafe_cast_ref()) }) .into_glib() @@ -885,7 +968,7 @@ unsafe extern "C" fn aggregator_create_new_pad( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), None, { + gst::panic_to_error!(&wrap, imp.panicked(), None, { let req_name: Borrowed> = from_glib_borrow(req_name); imp.create_new_pad( @@ -911,7 +994,7 @@ unsafe extern "C" fn aggregator_update_src_caps( *res = ptr::null_mut(); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::FlowReturn::Error, { + gst::panic_to_error!(&wrap, imp.panicked(), gst::FlowReturn::Error, { match imp.update_src_caps(wrap.unsafe_cast_ref(), &from_glib_borrow(caps)) { Ok(res_caps) => { *res = res_caps.into_ptr(); @@ -931,7 +1014,7 @@ unsafe extern "C" fn aggregator_fixate_src_caps( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), gst::Caps::new_empty(), { + gst::panic_to_error!(&wrap, imp.panicked(), gst::Caps::new_empty(), { imp.fixate_src_caps(wrap.unsafe_cast_ref(), from_glib_full(caps)) }) .into_ptr() @@ -945,7 +1028,7 @@ unsafe extern "C" fn aggregator_negotiated_src_caps( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { match imp.negotiated_src_caps(wrap.unsafe_cast_ref(), &from_glib_borrow(caps)) { Ok(()) => true, Err(err) => { @@ -964,8 +1047,22 @@ unsafe extern "C" fn aggregator_negotiate( let imp = instance.impl_(); let wrap: Borrowed = from_glib_borrow(ptr); - gst::panic_to_error!(&wrap, &imp.panicked(), false, { + gst::panic_to_error!(&wrap, imp.panicked(), false, { imp.negotiate(wrap.unsafe_cast_ref()) }) .into_glib() } + +unsafe extern "C" fn aggregator_peek_next_sample( + ptr: *mut ffi::GstAggregator, + pad: *mut ffi::GstAggregatorPad, +) -> *mut gst::ffi::GstSample { + let instance = &*(ptr as *mut T::Instance); + let imp = instance.impl_(); + let wrap: Borrowed = from_glib_borrow(ptr); + + gst::panic_to_error!(&wrap, imp.panicked(), None, { + imp.peek_next_sample(wrap.unsafe_cast_ref(), &from_glib_borrow(pad)) + }) + .to_glib_full() +} diff --git a/src/base/subclass/aggregator_pad.rs b/src/base/subclass/aggregator_pad.rs index 1a24f30..d996150 100644 --- a/src/base/subclass/aggregator_pad.rs +++ b/src/base/subclass/aggregator_pad.rs @@ -2,9 +2,9 @@ use super::super::ffi; +use glib::prelude::*; use glib::translate::*; -use gst::glib; -use gst::prelude::*; + use gst::subclass::prelude::*; use super::super::Aggregator; diff --git a/src/base/subclass/mod.rs b/src/base/subclass/mod.rs index 319dc13..31b9283 100644 --- a/src/base/subclass/mod.rs +++ b/src/base/subclass/mod.rs @@ -1,17 +1,15 @@ -// Copyright (C) 2016-2018 Sebastian Dröge -// 2016 Luis de Bethencourt -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +// Take a look at the license at the top of the repository in the LICENSE file. + #![allow(clippy::cast_ptr_alignment)] -pub mod aggregator; -pub mod aggregator_pad; + +mod aggregator; +mod aggregator_pad; pub mod prelude { + #[doc(hidden)] + pub use gst::subclass::prelude::*; + pub use super::aggregator::{AggregatorImpl, AggregatorImplExt}; pub use super::aggregator_pad::{AggregatorPadImpl, AggregatorPadImplExt}; } diff --git a/src/base/utils.rs b/src/base/utils.rs index 07e7687..444c834 100644 --- a/src/base/utils.rs +++ b/src/base/utils.rs @@ -1,9 +1,9 @@ // Take a look at the license at the top of the repository in the LICENSE file. use glib::translate::mut_override; -use gst::glib; #[must_use = "if unused the Mutex will immediately unlock"] +#[doc(alias = "GMutex")] pub struct MutexGuard<'a>(&'a glib::ffi::GMutex); impl<'a> MutexGuard<'a> { From eb2bb282f85ee18a7016938fbcf0d81281618a56 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 27 Oct 2021 21:58:42 -0600 Subject: [PATCH 07/10] Add version tracking file for aggregator bindings --- src/base/versions.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 src/base/versions.txt diff --git a/src/base/versions.txt b/src/base/versions.txt new file mode 100644 index 0000000..001d2ac --- /dev/null +++ b/src/base/versions.txt @@ -0,0 +1,4 @@ +Backported from: +gstreamer (https://gitlab.freedesktop.org/gstreamer/gstreamer @ 1.19.2) +gstreamer-rs (https://gitlab.freedesktop.org/gstreamer/gstreamer-rs @ 0.17.4) + From 10e711dbad3bd5492d4636ba8ed3570b314b7fc6 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 27 Oct 2021 22:12:52 -0600 Subject: [PATCH 08/10] Remove pub from top level mod declarations --- src/lib.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 17216de..4b5925d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,21 +1,23 @@ +#![allow(dead_code)] + mod device_provider; -pub mod ndi; +mod ndi; #[cfg(feature = "sink-v1_14")] #[path = "base/mod.rs"] -pub mod gst_base_compat; +mod gst_base_compat; #[cfg(any(feature = "sink", feature = "sink-v1_14"))] mod ndisink; #[cfg(any(feature = "sink", feature = "sink-v1_14"))] mod ndisinkcombiner; #[cfg(any(feature = "sink", feature = "sink-v1_14"))] -pub mod ndisinkmeta; +mod ndisinkmeta; mod ndisrc; mod ndisrcdemux; -pub mod ndisrcmeta; -pub mod ndisys; -pub mod receiver; +mod ndisrcmeta; +mod ndisys; +mod receiver; use crate::ndi::*; use crate::ndisys::*; From 1f71823f462195bc1b1599635e954e8156f12c46 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Wed, 3 Nov 2021 23:58:04 -0600 Subject: [PATCH 09/10] Register aggregator backport under NDI namespace to avoid collisions --- src/base/gstaggregator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/base/gstaggregator.c b/src/base/gstaggregator.c index 5a6e30f..d927b68 100644 --- a/src/base/gstaggregator.c +++ b/src/base/gstaggregator.c @@ -126,7 +126,7 @@ gst_aggregator_start_time_selection_get_type (void) {0, NULL, NULL} }; GType new_type = - g_enum_register_static ("GstAggregatorStartTimeSelection", values); + g_enum_register_static ("GstAggregatorNdiStartTimeSelection", values); g_once_init_leave (>ype, new_type); } @@ -2903,7 +2903,7 @@ gst_aggregator_get_type (void) }; _type = g_type_register_static (GST_TYPE_ELEMENT, - "GstAggregator", &info, G_TYPE_FLAG_ABSTRACT); + "GstAggregatorNdi", &info, G_TYPE_FLAG_ABSTRACT); aggregator_private_offset = g_type_add_instance_private (_type, sizeof (GstAggregatorPrivate)); From aaeed98882aa091c1848387c43f55f92e2ddc026 Mon Sep 17 00:00:00 2001 From: Alexandre Boukhlif Date: Thu, 4 Nov 2021 13:25:20 -0300 Subject: [PATCH 10/10] Updated type from i8 to u8 (#1) --- src/ndi.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ndi.rs b/src/ndi.rs index c54176b..4ab692f 100644 --- a/src/ndi.rs +++ b/src/ndi.rs @@ -804,7 +804,7 @@ impl<'a> VideoFrame<'a> { picture_aspect_ratio, frame_format_type, timecode, - p_data: frame.plane_data(0).unwrap().as_ptr() as *const i8, + p_data: frame.plane_data(0).unwrap().as_ptr() as *const u8, line_stride_or_data_size_in_bytes: frame.plane_stride()[0], p_metadata: ptr::null(), timestamp: 0,