How to use the vapoursynth.GRAY8 function in VapourSynth

To help you get started, we’ve selected a few VapourSynth examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Irrational-Encoding-Wizardry / vsutil / tests / test_vsutil.py View on Github external
def test_plane(self):
        y = vs.core.std.BlankClip(format=vs.GRAY8)
        # This should be a no-op, and even the clip reference shouldn’t change
        self.assertEqual(y, vsutil.plane(y, 0))
github Irrational-Encoding-Wizardry / kagefunc / tests.py View on Github external
"""
Many of these don’t actually test the logic and just make some 
basic assertions as well as a call to check if frames are produced.
"""
import unittest
import vapoursynth as vs
import kagefunc as kgf


class KagefuncTests(unittest.TestCase):
    BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128],
                                              length=100)
    WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128],
                                              length=100)
    GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(_format=vs.GRAY8, width=160, height=120, color=[255])

    def test_retinex_edgemask(self):
        mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
        self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
        self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
        self.assertEqual(mask.format.color_family, vs.GRAY)
        # request a frame to see if that errors
        mask.get_frame(0)

    def test_inverse_scale(self):
        src = self.BLACK_SAMPLE_CLIP
        resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
        self.assertEqual(resized.format.id, vs.GRAYS)
        self.assertEqual(resized.height, 90)
        self.assertEqual(resized.width, 120)
        resized = kgf.inverse_scale(src, height=90)
github Irrational-Encoding-Wizardry / kagefunc / kagefunc.py View on Github external
# right shift by 4 pixels.
    # fmtc uses at least 16 bit internally, so it's slower for 8 bit,
    # but its behaviour when shifting/replicating edge pixels makes it faster otherwise
    if bits < 16:
        right = core.resize.Point(clip, src_left=4)
    else:
        right = core.fmtc.resample(clip, sx=4, flt=False)
    subedge = core.std.Expr([clip, right], [yexpr, uvexpr], yuv_fmt.id)
    c444 = split(subedge.resize.Bicubic(format=vs.YUV444P8, filter_param_a=0, filter_param_b=0.5))
    subedge = core.std.Expr(c444, 'x y z min min')

    clip, ref = get_y(clip), get_y(ref)
    ref = ref if clip.format == ref.format else depth(ref, bits)

    clips = [clip.std.Convolution([1] * 9), ref.std.Convolution([1] * 9)]
    diff = core.std.Expr(clips, difexpr, vs.GRAY8).std.Maximum().std.Maximum()

    mask = core.misc.Hysteresis(subedge, diff)
    mask = iterate(mask, core.std.Maximum, expand_n)
    mask = mask.std.Inflate().std.Inflate().std.Convolution([1] * 9)
    return depth(mask, bits, range=1, range_in=1)
github Endilll / vapoursynth-preview / vspreview / core / types.py View on Github external
if vs_output.format == vs.COMPATBGR32:  # type: ignore
            return vs_output

        is_subsampled = (vs_output.format.subsampling_w != 0
                         or vs_output.format.subsampling_h != 0)
        if not is_subsampled:
            resizer = self.Resizer.Point

        if vs_output.format.color_family == vs.RGB:
            del resizer_kwargs['matrix_in_s']

        if alpha:
            if vs_output.format == vs.GRAY8:  # type: ignore
                return vs_output
            resizer_kwargs['format'] = vs.GRAY8

        vs_output = resizer(vs_output, **resizer_kwargs,
                            **self.main.VS_OUTPUT_RESIZER_KWARGS)

        return vs_output
github Irrational-Encoding-Wizardry / fvsfunc / fvsfunc.py View on Github external
before = clip[:start] if start != 0 else None
    middle = clip[start:end]
    after = clip[end:] if end != clip.num_frames else None

    matrix_s = None
    matrix_in_s = None
    if clip_cf == vs.YUV and overlay_cf == vs.RGB:
        matrix_s = matrix
    if overlay_cf == vs.YUV and clip_cf == vs.RGB:
        matrix_in_s = matrix
    sign = core.resize.Spline36(overlay[0], clip.width, clip.height, format=clip.format.id,
                                matrix_s=matrix_s, matrix_in_s=matrix_in_s,
                                dither_type='error_diffusion')

    if overlay[1] is None:
        overlay[1] = core.std.BlankClip(sign, format=vs.GRAY8, color=255)
    mask = core.resize.Bicubic(overlay[1], clip.width, clip.height)
    mask = Depth(mask, bits=clip.format.bits_per_sample, range='full', range_in='full')

    middle = core.std.MaskedMerge(middle, sign, mask)

    out = middle
    if before is not None:
        out = before + out
    if after is not None:
        out = out + after
    return out
github Endilll / vapoursynth-preview / vspreview / core / media.py View on Github external
if vs_output.format == COMPATBGR32:  # type: ignore
            return vs_output

        is_subsampled = (vs_output.format.subsampling_w != 0
                         or vs_output.format.subsampling_h != 0)
        if not is_subsampled:
            resizer = self.Resizer.Point

        if vs_output.format.color_family == RGB:
            del resizer_kwargs['matrix_in_s']

        if alpha:
            if vs_output.format == GRAY8:  # type: ignore
                return vs_output
            resizer_kwargs['format'] = GRAY8

        vs_output = resizer(vs_output, **resizer_kwargs,
                            **settings.VS_OUTPUT_RESIZER_KWARGS)

        return vs_output
github darealshinji / vapoursynth-plugins / scripts / vsTAAmbk.py View on Github external
def __add__(self, mask_b):
        if not isinstance(mask_b, MaskParent):
            raise TypeError(MODULE_NAME + ': Incorrect mask_b type.')
        self.mask = self.core.std.Expr([self.mask, mask_b.mask], "x y max", vs.GRAY8)
        return self
github darealshinji / vapoursynth-plugins / temporalsoften2 / temporalsoften2.py View on Github external
def set_scenechange(self, clip, threshold=15, log=None):
        sc = clip
        cf = clip.format.color_family
        if cf == vs.RGB:
            sc = self.resize(format=vs.GRAY8)
        sc = self.detect(sc, threshold)
        if cf == vs.RGB:
            sc = self.modframe([clip, sc], self.set_props)
        return sc