Skip to content
GitLab
    • Explore Projects Groups Snippets
Projects Groups Snippets
  • /
  • Help
    • Help
    • Support
    • Community forum
    • Submit feedback
    • Contribute to GitLab
  • Sign in / Register
  • P PyAV
  • Project information
    • Project information
    • Activity
    • Labels
    • Members
  • Repository
    • Repository
    • Files
    • Commits
    • Branches
    • Tags
    • Contributors
    • Graph
    • Compare
  • Issues 37
    • Issues 37
    • List
    • Boards
    • Service Desk
    • Milestones
  • Merge requests 26
    • Merge requests 26
  • CI/CD
    • CI/CD
    • Pipelines
    • Jobs
    • Schedules
  • Deployments
    • Deployments
    • Environments
    • Releases
  • Packages and registries
    • Packages and registries
    • Package Registry
    • Infrastructure Registry
  • Monitor
    • Monitor
    • Incidents
  • Analytics
    • Analytics
    • Value stream
    • CI/CD
    • Repository
  • Wiki
    • Wiki
  • Snippets
    • Snippets
  • Activity
  • Graph
  • Create a new issue
  • Jobs
  • Commits
  • Issue Boards
Collapse sidebar
  • PyAV
  • PyAV
  • Merge requests
  • !765

Better time_base support with filters

  • Review changes

  • Download
  • Email patches
  • Plain diff
Merged Administrator requested to merge github/fork/fvollmer/time_base_filtered into main 4 years ago
  • Overview 2
  • Commits 1
  • Pipelines 0
  • Changes 3

Created by: fvollmer

PyAV frames didn't have a time_base after they were passed through a filter and add_buffer also ignored the time_base.

The following example deinterlaces a 30fps video.

import av.filter
import errno

input_container = av.open(format='lavfi', file='color=c=pink:duration=5:r=30')
input_video_stream = input_container.streams.video[0]

graph = av.filter.Graph()
buffer = graph.add_buffer(template=input_video_stream)
bwdif = graph.add("bwdif", "send_field:tff:all")
buffersink = graph.add("buffersink")
buffer.link_to(bwdif)
bwdif.link_to(buffersink)
graph.configure()

for frame in input_container.decode():
    print(f"old time_base: {frame.time_base}, old pts: {frame.pts}")
    graph.push(frame)
    while True:
        try:
            filtered_frame = graph.pull()
        except av.utils.AVError as e:
            if e.errno != errno.EAGAIN:
                raise
            break

        print(f"filtered time_base: {filtered_frame.time_base}, filtered pts: {filtered_frame.pts}")

With these patches the time_base is handled correctly

old time_base: 1/30, old pts: 0
old time_base: 1/30, old pts: 1
filtered time_base: 1/60, filtered pts: 0
filtered time_base: 1/60, filtered pts: 1
old time_base: 1/30, old pts: 2
filtered time_base: 1/60, filtered pts: 2
filtered time_base: 1/60, filtered pts: 3
old time_base: 1/30, old pts: 3
filtered time_base: 1/60, filtered pts: 4
filtered time_base: 1/60, filtered pts: 5
...

instead of

old time_base: 1/30, old pts: 0
old time_base: 1/30, old pts: 1
filtered time_base: None, filtered pts: 0
filtered time_base: None, filtered pts: 1
old time_base: 1/30, old pts: 2
filtered time_base: None, filtered pts: 2
filtered time_base: None, filtered pts: 3
old time_base: 1/30, old pts: 3
filtered time_base: None, filtered pts: 4
filtered time_base: None, filtered pts: 5
Compare
  • main (base)

and
  • latest version
    ca43bc80
    1 commit, 2 years ago

3 files
+ 54
- 2

    Preferences

    File browser
    Compare changes
av/f‎ilter‎
conte‎xt.pyx‎ +2 -0
grap‎h.pyx‎ +10 -2
te‎sts‎
test_fi‎lters.py‎ +42 -0
av/filter/context.pyx
+ 2
- 0
  • View file @ ca43bc80

  • Edit in single-file editor

  • Open in Web IDE


@@ -6,6 +6,7 @@ from av.dictionary import Dictionary
from av.error cimport err_check
from av.filter.pad cimport alloc_filter_pads
from av.frame cimport Frame
from av.utils cimport avrational_to_fraction
from av.video.frame cimport VideoFrame, alloc_video_frame
@@ -106,4 +107,5 @@ cdef class FilterContext(object):
err_check(lib.av_buffersink_get_frame(self.ptr, frame.ptr))
frame._init_user_attributes()
frame.time_base = avrational_to_fraction(&self.ptr.inputs[0].time_base)
return frame
av/filter/graph.pyx
+ 10
- 2
  • View file @ ca43bc80

  • Edit in single-file editor

  • Open in Web IDE


from fractions import Fraction
import warnings
from av.audio.format cimport AudioFormat
from av.audio.frame cimport AudioFrame
@@ -122,7 +123,7 @@ cdef class Graph(object):
self._register_context(py_ctx)
self._nb_filters_seen = self.ptr.nb_filters
def add_buffer(self, template=None, width=None, height=None, format=None, name=None):
def add_buffer(self, template=None, width=None, height=None, format=None, name=None, time_base=None):
if template is not None:
if width is None:
@@ -131,6 +132,8 @@ cdef class Graph(object):
height = template.height
if format is None:
format = template.format
if time_base is None:
time_base = template.time_base
if width is None:
raise ValueError('missing width')
@@ -138,13 +141,18 @@ cdef class Graph(object):
raise ValueError('missing height')
if format is None:
raise ValueError('missing format')
if time_base is None:
warnings.warn('missing time_base. Guessing 1/1000 time base. '
'This is deprecated and may be removed in future releases.',
DeprecationWarning)
time_base = Fraction(1, 1000)
return self.add(
'buffer',
name=name,
video_size=f'{width}x{height}',
pix_fmt=str(int(VideoFormat(format))),
time_base='1/1000',
time_base=str(time_base),
pixel_aspect='1/1',
)
tests/test_filters.py
+ 42
- 0
  • View file @ ca43bc80

  • Edit in single-file editor

  • Open in Web IDE


@@ -7,6 +7,7 @@ import numpy as np
from av import AudioFrame, VideoFrame
from av.audio.frame import format_dtypes
from av.filter import Filter, Graph
import av
from .common import Image, TestCase, fate_suite
@@ -34,6 +35,17 @@ def generate_audio_frame(frame_num, input_format='s16', layout='stereo', sample_
return frame
def pull_until_blocked(graph):
frames = []
while True:
try:
frames.append(graph.pull())
except av.utils.AVError as e:
if e.errno != errno.EAGAIN:
raise
return frames
class TestFilters(TestCase):
def test_filter_descriptor(self):
@@ -213,3 +225,33 @@ class TestFilters(TestCase):
output_data = out_frame.to_ndarray()
self.assertTrue(np.allclose(input_data * 0.5, output_data), "Check that volume is reduced")
def test_video_buffer(self):
input_container = av.open(format="lavfi", file="color=c=pink:duration=1:r=30")
input_video_stream = input_container.streams.video[0]
graph = av.filter.Graph()
buffer = graph.add_buffer(template=input_video_stream)
bwdif = graph.add("bwdif", "send_field:tff:all")
buffersink = graph.add("buffersink")
buffer.link_to(bwdif)
bwdif.link_to(buffersink)
graph.configure()
for frame in input_container.decode():
self.assertEqual(frame.time_base, Fraction(1, 30))
graph.push(frame)
filtered_frames = pull_until_blocked(graph)
if frame.pts == 0:
# no output for the first input frame
self.assertEqual(len(filtered_frames), 0)
else:
# we expect two filtered frames per input frame
self.assertEqual(len(filtered_frames), 2)
self.assertEqual(filtered_frames[0].pts, (frame.pts - 1) * 2)
self.assertEqual(filtered_frames[0].time_base, Fraction(1, 60))
self.assertEqual(filtered_frames[1].pts, (frame.pts - 1) * 2 + 1)
self.assertEqual(filtered_frames[1].time_base, Fraction(1, 60))
0 Assignees
None
Assign to
Reviewer
Jeremy Lainé's avatar
Jeremy Lainé
Request review from
Labels
1
changes requested
1
changes requested
    Assign labels
  • Manage project labels

Milestone
No milestone
None
None
Time tracking
No estimate or time spent
Lock merge request
Unlocked
2
2 participants
Jeremy Lainé
Administrator
Reference: PyAV-Org/PyAV!765
Source branch: github/fork/fvollmer/time_base_filtered

Menu

Explore Projects Groups Snippets