How to use VapourSynth - 10 common examples

To help you get started, we’ve selected a few VapourSynth examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github darealshinji / vapoursynth-plugins / scripts / vsTAAmbk.py View on Github external
def soothe(clip, src, keep=24):
    core = vs.get_core()
    clip_bits = clip.format.bits_per_sample
    src_bits = src.format.bits_per_sample
    if clip_bits != src_bits:
        raise ValueError(MODULE_NAME + ': temporal_stabilize: bits depth of clip and src mismatch.')

    neutral = 1 << (clip_bits - 1)
    ceil = (1 << clip_bits) - 1
    multiple = ceil // 255
    const = 100 * multiple
    kp = keep * multiple

    diff = core.std.MakeDiff(src, clip)
    try:
        diff_soften = core.misc.AverageFrame(diff, weights=[1, 1, 1], scenechange=32)
    except AttributeError:
        diff_soften = core.focus.TemporalSoften(diff, radius=1, luma_threshold=255,
github darealshinji / vapoursynth-plugins / scripts / scoll.py View on Github external
def ezydegrain(src, tr=3, thsad=250, blksize=None, overlap=None, pel=None, limit=None, recalc=False, plane=4):
    core = vs.get_core()

    # Vars

    if blksize is None:
        if src.width < 1280 or src.height < 720:
            blksize = 8
        elif src.width >= 3840 or src.height >= 2160:
            blksize = 32
        else:
            blksize = 16

    if overlap is None:
        overlap = blksize // 2

    if pel is None:
        if src.width < 1280 or src.height < 720:
github Irrational-Encoding-Wizardry / kagefunc / kagefunc.py View on Github external
It's kinda experimental, but I wanted to try something like this.
    It works by finding the edge of the subtitle (where the black border and the white fill color touch),
    and it grows these areas into a regular brightness + difference mask via hysteresis.
    This should (in theory) reliably find all hardsubs in the image with barely any false positives (or none at all).
    Output depth and processing precision are the same as the input
    It is not necessary for 'clip' and 'ref' to have the same bit depth, as 'ref' will be dithered to match 'clip'
    Most of this code was written by Zastin (https://github.com/Z4ST1N)
    Clean code soon(tm)
    """
    clp_f = clip.format
    bits = clp_f.bits_per_sample
    stype = clp_f.sample_type

    expand_n = fallback(expand_n, clip.width // 200)

    yuv_fmt = core.register_format(clp_f.color_family, vs.INTEGER, 8, clp_f.subsampling_w, clp_f.subsampling_h)

    y_range = 219 << (bits - 8) if stype == vs.INTEGER else 1
    uv_range = 224 << (bits - 8) if stype == vs.INTEGER else 1
    offset = 16 << (bits - 8) if stype == vs.INTEGER else 0

    uv_abs = ' abs ' if stype == vs.FLOAT else ' {} - abs '.format((1 << bits) // 2)
    yexpr = 'x y - abs {thr} > 255 0 ?'.format(thr=y_range * 0.7)
    uvexpr = 'x {uv_abs} {thr} < y {uv_abs} {thr} < and 255 0 ?'.format(uv_abs=uv_abs, thr=uv_range * 0.1)

    difexpr = 'x {upper} > x {lower} < or x y - abs {mindiff} > and 255 0 ?'.format(upper=y_range * 0.8 + offset,
                                                                                    lower=y_range * 0.2 + offset,
                                                                                    mindiff=y_range * 0.1)

    # right shift by 4 pixels.
    # fmtc uses at least 16 bit internally, so it's slower for 8 bit,
    # but its behaviour when shifting/replicating edge pixels makes it faster otherwise
github darealshinji / vapoursynth-plugins / scripts / vsTAAmbk.py View on Github external
def __init__(self, clip):
        super(MaskParent, self).__init__(clip)
        if clip.format.color_family is not vs.GRAY:
            self.clip = self.core.std.ShufflePlanes(self.clip, 0, vs.GRAY)
        self.clip = mvf.Depth(self.clip, 8)  # Mask will always be processed in 8bit scale
        self.mask = None
        self.multi = ((1 << self.clip_bits) - 1) // 255
github Irrational-Encoding-Wizardry / fvsfunc / fvsfunc.py View on Github external
ow = src.width
    oh = src.height

    bits = src.format.bits_per_sample
    sample_type = src.format.sample_type
    
    if sample_type == vs.INTEGER:
        maxvalue = (1 << bits) - 1
        thr = thr * maxvalue // 0xFF
    else:
        maxvalue = 1
        thr /= (235 - 16)

    # Fix lineart
    src_y = core.std.ShufflePlanes(src, planes=0, colorfamily=vs.GRAY)
    deb = Resize(src_y, w, h, kernel=kernel, a1=b, a2=c, taps=taps, invks=True)
    sharp = nnp2.nnedi3_rpow2(deb, 2, ow, oh)
    thrlow = 4 * maxvalue // 0xFF if sample_type == vs.INTEGER else 4 / 0xFF
    thrhigh = 24 * maxvalue // 0xFF if sample_type == vs.INTEGER else 24 / 0xFF
    edgemask = core.std.Prewitt(sharp, planes=0)
    edgemask = core.std.Expr(edgemask, "x {thrhigh} >= {maxvalue} x {thrlow} <= 0 x ? ?"
                                       .format(thrhigh=thrhigh, maxvalue=maxvalue, thrlow=thrlow))
    if kernel == "bicubic" and c >= 0.7:
        edgemask = core.std.Maximum(edgemask, planes=0)
    sharp = core.resize.Point(sharp, format=src.format.id)

    # Restore true 1080p
    deb_upscale = Resize(deb, ow, oh, kernel=kernel, a1=b, a2=c, taps=taps)
    diffmask = core.std.Expr([src_y, deb_upscale], 'x y - abs')
    for _ in range(expand):
        diffmask = core.std.Maximum(diffmask, planes=0)
github darealshinji / vapoursynth-plugins / scripts / mvsfunc.py View on Github external
# Change chroma sub-sampling if needed
        if dHSubS != sHSubS or dVSubS != sVSubS:
            # Apply depth conversion for processed clip
            clip = Depth(clip, pbitPS, pSType, fulls, fulls, dither, useZ, ampo, ampn, dyn, staticnoise)
            clip = core.fmtc.resample(clip, kernel=kernel, taps=taps, a1=a1, a2=a2, css=css, planes=[2,3,3], fulls=fulls, fulld=fulls, cplace=cplace)
        # Apply depth conversion for output clip
        clip = Depth(clip, dbitPS, dSType, fulls, fulld, dither, useZ, ampo, ampn, dyn, staticnoise)
    elif sIsGRAY:
        # Apply depth conversion for output clip
        clip = Depth(clip, dbitPS, dSType, fulls, fulld, dither, useZ, ampo, ampn, dyn, staticnoise)
        # Shuffle planes for Gray input
        widthc = input.width // dHSubS
        heightc = input.height // dVSubS
        UV = core.std.BlankClip(clip, width=widthc, height=heightc, \
        color=1 << (dbitPS - 1) if dSType == vs.INTEGER else 0)
        clip = core.std.ShufflePlanes([clip,UV,UV], [0,0,0], vs.YUV)
    else:
        # Apply depth conversion for processed clip
        clip = Depth(clip, pbitPS, pSType, fulls, fulls, dither, useZ, ampo, ampn, dyn, staticnoise)
        # Apply matrix conversion for RGB input
        if matrix == "OPP":
            clip = core.fmtc.matrix(clip, fulls=fulls, fulld=fulld, coef=[1/3,1/3,1/3,0, 1/2,0,-1/2,0, 1/4,-1/2,1/4,0], col_fam=vs.YUV)
            clip = SetColorSpace(clip, Matrix=2)
        elif matrix == "2020cl":
            clip = core.fmtc.matrix2020cl(clip, full=fulld)
        else:
            clip = core.fmtc.matrix(clip, mat=matrix, fulls=fulls, fulld=fulld, col_fam=vs.YCOCG if matrix == "YCgCo" else vs.YUV)
        # Change chroma sub-sampling if needed
        if dHSubS != sHSubS or dVSubS != sVSubS:
            clip = core.fmtc.resample(clip, kernel=kernel, taps=taps, a1=a1, a2=a2, css=css, planes=[2,3,3], fulls=fulld, fulld=fulld, cplace=cplace)
        # Apply depth conversion for output clip
        clip = Depth(clip, dbitPS, dSType, fulld, fulld, dither, useZ, ampo, ampn, dyn, staticnoise)
github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
def test_subsampling(self):
        self.assertEqual('444', vsutil.get_subsampling(self.YUV444P8_CLIP))
        self.assertEqual('440', vsutil.get_subsampling(self.YUV440P8_CLIP))
        self.assertEqual('420', vsutil.get_subsampling(self.YUV420P8_CLIP))
        self.assertEqual('422', vsutil.get_subsampling(self.YUV422P8_CLIP))
        self.assertEqual('411', vsutil.get_subsampling(self.YUV411P8_CLIP))
        self.assertEqual('410', vsutil.get_subsampling(self.YUV410P8_CLIP))
        self.assertEqual(None, vsutil.get_subsampling(self.RGB24_CLIP))
        # let’s create a custom format with higher subsampling than any of the legal ones to test that branch as well:
        with self.assertRaisesRegex(ValueError, 'Unknown subsampling.'):
            vsutil.get_subsampling(
                vs.core.std.BlankClip(_format=self.YUV444P8_CLIP.format.replace(subsampling_w=4))
            )
github Irrational-Encoding-Wizardry / kagefunc / tests.py View on Github external
"""
Many of these don’t actually test the logic and just make some 
basic assertions as well as a call to check if frames are produced.
"""
import unittest
import vapoursynth as vs
import kagefunc as kgf


class KagefuncTests(unittest.TestCase):
    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)
    GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.GRAY8, width=160, height=120, color=[255])

    def test_retinex_edgemask(self):
        mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
        self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
        self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
        self.assertEqual(mask.format.color_family, vs.GRAY)
        # request a frame to see if that errors
        mask.get_frame(0)

    def test_inverse_scale(self):
        src = self.BLACK_SAMPLE_CLIP
        resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
        self.assertEqual(resized.format.id, vs.GRAYS)
        self.assertEqual(resized.height, 90)
github Irrational-Encoding-Wizardry / kagefunc / tests.py View on Github external
"""
Many of these don’t actually test the logic and just make some 
basic assertions as well as a call to check if frames are produced.
"""
import unittest
import vapoursynth as vs
import kagefunc as kgf


class KagefuncTests(unittest.TestCase):
    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)
    GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.GRAY8, width=160, height=120, color=[255])

    def test_retinex_edgemask(self):
        mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
        self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
        self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
        self.assertEqual(mask.format.color_family, vs.GRAY)
        # request a frame to see if that errors
        mask.get_frame(0)

    def test_inverse_scale(self):
        src = self.BLACK_SAMPLE_CLIP
        resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
import vsutil


class VsUtilTests(unittest.TestCase):
    YUV420P8_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV420P10_CLIP = vs.core.std.BlankClip(format=vs.YUV420P10, width=160, height=120, color=[0, 128, 128], length=100)
    YUV444P8_CLIP = vs.core.std.BlankClip(format=vs.YUV444P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV422P8_CLIP = vs.core.std.BlankClip(format=vs.YUV422P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV410P8_CLIP = vs.core.std.BlankClip(format=vs.YUV410P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV411P8_CLIP = vs.core.std.BlankClip(format=vs.YUV411P8, width=160, height=120, color=[0, 128, 128], length=100)
    YUV440P8_CLIP = vs.core.std.BlankClip(format=vs.YUV440P8, width=160, height=120, color=[0, 128, 128], length=100)
    RGB24_CLIP = vs.core.std.BlankClip(format=vs.RGB24)

    SMALLER_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=10, height=10)

    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)

    VARIABLE_FORMAT_CLIP = vs.core.std.Interleave([YUV420P8_CLIP, YUV444P8_CLIP], mismatch=True)

    def assert_same_dimensions(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
        Assert that two clips have the same width and height.
        """
        self.assertEqual(clip_a.height, clip_b.height, f'Same height expected, was {clip_a.height} and {clip_b.height}.')
        self.assertEqual(clip_a.width, clip_b.width, f'Same width expected, was {clip_a.width} and {clip_b.width}.')

    def assert_same_format(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
        """
        Assert that two clips have the same format (but not necessarily size).