How to use the vapoursynth.core.std.BlankClip function in VapourSynth

To help you get started, we’ve selected a few VapourSynth examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Irrational-Encoding-Wizardry / kagefunc / tests.py View on Github external
"""
Many of these don’t actually test the logic and just make some 
basic assertions as well as a call to check if frames are produced.
"""
import unittest
import vapoursynth as vs
import kagefunc as kgf


class KagefuncTests(unittest.TestCase):
    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)
    GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.GRAY8, width=160, height=120, color=[255])

    def test_retinex_edgemask(self):
        mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
        self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
        self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
        self.assertEqual(mask.format.color_family, vs.GRAY)
        # request a frame to see if that errors
        mask.get_frame(0)

    def test_inverse_scale(self):
        src = self.BLACK_SAMPLE_CLIP
        resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
        self.assertEqual(resized.format.id, vs.GRAYS)
        self.assertEqual(resized.height, 90)
github Irrational-Encoding-Wizardry / kagefunc / tests.py View on Github external
"""
Many of these don’t actually test the logic and just make some 
basic assertions as well as a call to check if frames are produced.
"""
import unittest
import vapoursynth as vs
import kagefunc as kgf


class KagefuncTests(unittest.TestCase):
    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)
    GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.GRAY8, width=160, height=120, color=[255])

    def test_retinex_edgemask(self):
        mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
        self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
        self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
        self.assertEqual(mask.format.color_family, vs.GRAY)
        # request a frame to see if that errors
        mask.get_frame(0)

    def test_inverse_scale(self):
        src = self.BLACK_SAMPLE_CLIP
        resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
import vsutil


class VsUtilTests(unittest.TestCase):
    YUV420P8_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV420P10_CLIP = vs.core.std.BlankClip(format=vs.YUV420P10, width=160, height=120, color=[0, 128, 128], length=100)
    YUV444P8_CLIP = vs.core.std.BlankClip(format=vs.YUV444P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV422P8_CLIP = vs.core.std.BlankClip(format=vs.YUV422P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV410P8_CLIP = vs.core.std.BlankClip(format=vs.YUV410P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV411P8_CLIP = vs.core.std.BlankClip(format=vs.YUV411P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV440P8_CLIP = vs.core.std.BlankClip(format=vs.YUV440P8, width=160, height=120, color=[0, 128, 128], length=100)
    RGB24_CLIP = vs.core.std.BlankClip(format=vs.RGB24)

    SMALLER_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=10, height=10)

    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)

    VARIABLE_FORMAT_CLIP = vs.core.std.Interleave([YUV420P8_CLIP, YUV444P8_CLIP], mismatch=True)

    def assert_same_dimensions(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
        Assert that two clips have the same width and height.
        """
        self.assertEqual(clip_a.height, clip_b.height, f'Same height expected, was {clip_a.height} and {clip_b.height}.')
        self.assertEqual(clip_a.width, clip_b.width, f'Same width expected, was {clip_a.width} and {clip_b.width}.')

    def assert_same_format(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
        Assert that two clips have the same format (but not necessarily size).
github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
import unittest

import vapoursynth as vs

import vsutil


class VsUtilTests(unittest.TestCase):
    YUV420P8_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV420P10_CLIP = vs.core.std.BlankClip(format=vs.YUV420P10, width=160, height=120, color=[0, 128, 128], length=100)
    YUV444P8_CLIP = vs.core.std.BlankClip(format=vs.YUV444P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV422P8_CLIP = vs.core.std.BlankClip(format=vs.YUV422P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV410P8_CLIP = vs.core.std.BlankClip(format=vs.YUV410P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV411P8_CLIP = vs.core.std.BlankClip(format=vs.YUV411P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV440P8_CLIP = vs.core.std.BlankClip(format=vs.YUV440P8, width=160, height=120, color=[0, 128, 128], length=100)
    RGB24_CLIP = vs.core.std.BlankClip(format=vs.RGB24)

    SMALLER_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=10, height=10)

    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)

    VARIABLE_FORMAT_CLIP = vs.core.std.Interleave([YUV420P8_CLIP, YUV444P8_CLIP], mismatch=True)

    def assert_same_dimensions(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
import unittest

import vapoursynth as vs

import vsutil


class VsUtilTests(unittest.TestCase):
    YUV420P8_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV420P10_CLIP = vs.core.std.BlankClip(format=vs.YUV420P10, width=160, height=120, color=[0, 128, 128], length=100)
    YUV444P8_CLIP = vs.core.std.BlankClip(format=vs.YUV444P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV422P8_CLIP = vs.core.std.BlankClip(format=vs.YUV422P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV410P8_CLIP = vs.core.std.BlankClip(format=vs.YUV410P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV411P8_CLIP = vs.core.std.BlankClip(format=vs.YUV411P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV440P8_CLIP = vs.core.std.BlankClip(format=vs.YUV440P8, width=160, height=120, color=[0, 128, 128], length=100)
    RGB24_CLIP = vs.core.std.BlankClip(format=vs.RGB24)

    SMALLER_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=10, height=10)

    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)

    VARIABLE_FORMAT_CLIP = vs.core.std.Interleave([YUV420P8_CLIP, YUV444P8_CLIP], mismatch=True)

    def assert_same_dimensions(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
        Assert that two clips have the same width and height.
        """
github rlaPHOENiX / VSGAN / vsgan / __init__.py View on Github external
def run(self, clip, chunk=False):
        # convert clip to RGB24 as it cannot read any other color space
        buffer = mvsfunc.ToRGB(clip, depth=8, kernel="spline36")  # expecting RGB24 (RGB 8bpp)
        # send the clip array to execute()
        results = []
        for c in self.chunk_clip(buffer) if chunk else [buffer]:
            results.append(core.std.FrameEval(
                core.std.BlankClip(
                    clip=c,
                    width=c.width * self.model_scale,
                    height=c.height * self.model_scale
                ),
                functools.partial(
                    self.execute,
                    clip=c
                )
            ))
        # if chunked, rejoin the chunked clips otherwise return the result
        buffer = core.std.StackHorizontal([
            core.std.StackVertical([results[0], results[1]]),
            core.std.StackVertical([results[2], results[3]])
        ]) if chunk else results[0]
        
        # VSGAN used to convert back to the original color space which resulted