diff --git a/av/filter/context.pyx b/av/filter/context.pyx index 30481eb21788239dfe2f818210977e125783eec4..9c9a1fa20448bd4fa8abc5793c5080ef6c5c0d69 100644 --- a/av/filter/context.pyx +++ b/av/filter/context.pyx @@ -77,7 +77,10 @@ cdef class FilterContext(object): def push(self, Frame frame): - if self.filter.name in ('abuffer', 'buffer'): + if frame is None: + err_check(lib.av_buffersrc_write_frame(self.ptr, NULL)) + return + elif self.filter.name in ('abuffer', 'buffer'): err_check(lib.av_buffersrc_write_frame(self.ptr, frame.ptr)) return diff --git a/av/filter/graph.pyx b/av/filter/graph.pyx index 20c76c7de1f24c91f3d822160de58845b7e170eb..bcb49f788e178af4deff15da529041c1b787b808 100644 --- a/av/filter/graph.pyx +++ b/av/filter/graph.pyx @@ -196,12 +196,14 @@ cdef class Graph(object): def push(self, frame): - if isinstance(frame, VideoFrame): + if frame is None: + contexts = self._context_by_type.get('buffer', []) + self._context_by_type.get('abuffer', []) + elif isinstance(frame, VideoFrame): contexts = self._context_by_type.get('buffer', []) elif isinstance(frame, AudioFrame): contexts = self._context_by_type.get('abuffer', []) else: - raise ValueError('can only push VideoFrame or AudioFrame', type(frame)) + raise ValueError('can only AudioFrame, VideoFrame or None; got %s' % type(frame)) if len(contexts) != 1: raise ValueError('can only auto-push with single buffer; found %s' % len(contexts)) diff --git a/tests/test_filters.py b/tests/test_filters.py index 2f3d6985fa1b7296f75001415ad181106e37ed0e..f73bf4cc862a2490edd9d343c95505e9bf67c7bb 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -260,3 +260,27 @@ class TestFilters(TestCase): self.assertEqual(filtered_frames[1].pts, (frame.pts - 1) * 2 + 1) self.assertEqual(filtered_frames[1].time_base, Fraction(1, 60)) + + def test_EOF(self): + input_container = av.open(format="lavfi", file="color=c=pink:duration=1:r=30") + video_stream = input_container.streams.video[0] + + graph = av.filter.Graph() + video_in = graph.add_buffer(template=video_stream) + palette_gen_filter = graph.add("palettegen") + video_out = graph.add("buffersink") + video_in.link_to(palette_gen_filter) + palette_gen_filter.link_to(video_out) + graph.configure() + + for frame in input_container.decode(video=0): + graph.push(frame) + + graph.push(None) + + # if we do not push None, we get a BlockingIOError + palette_frame = graph.pull() + + self.assertIsInstance(palette_frame, av.VideoFrame) + self.assertEqual(palette_frame.width, 16) + self.assertEqual(palette_frame.height, 16)