From 61723f1f72064914dc8591631816fb4e9f43555e Mon Sep 17 00:00:00 2001 From: Georg Pelz Date: Sat, 11 Jul 2020 15:34:23 +0200 Subject: [PATCH] updated scripts mainly small changes --- README.md | 2 - fvsfunc.py | 2 +- havsfunc.py | 1569 +++++++++++++++++++++++++---------------------- nnedi3_rpow2.py | 92 +++ psharpen.py | 86 --- vsTAAmbk.py | 715 +++++++++++++++++++++ 6 files changed, 1642 insertions(+), 824 deletions(-) delete mode 100644 README.md create mode 100644 nnedi3_rpow2.py delete mode 100644 psharpen.py create mode 100644 vsTAAmbk.py diff --git a/README.md b/README.md deleted file mode 100644 index 9924c1e..0000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# VapoursynthScriptsInHybrid -All the Vapoursynth scripts that Hybrid uses diff --git a/fvsfunc.py b/fvsfunc.py index 4cf8931..da66f01 100644 --- a/fvsfunc.py +++ b/fvsfunc.py @@ -146,7 +146,7 @@ def GradFun3(src, thr=None, radius=None, elast=None, mask=None, mode=None, ampo=None, ampn=None, pat=None, dyn=None, staticnoise=None, smode=None, thr_det=None, debug=None, thrc=None, radiusc=None, elastc=None, planes=None, ref=None, - yuv444=None, w=None, h=None, resizer=None, b=None, c=None, bits=None): + yuv444=None, w=None, h=None, resizer=None, b=None, c=None, bits=None, opencl=False, device=None): def smooth_mod(src_16, ref_16, smode, radius, thr, elast, planes): if smode == 0: diff --git a/havsfunc.py b/havsfunc.py index 06f0184..3b5c090 100644 --- a/havsfunc.py +++ b/havsfunc.py @@ -8,7 +8,7 @@ """ Holy's ported AviSynth functions for VapourSynth. -(replaced ff3dfilter with neo_fft3d dependencies) +(replaced fft3dfilter mit neo_fft3d) Main functions: daa @@ -19,7 +19,7 @@ Deblock_QED DeHalo_alpha EdgeCleaner - FineDehalo + FineDehalo, FineDehalo2 YAHR HQDeringmod QTGMC @@ -66,12 +66,12 @@ Weave ContraSharpening MinBlur - sbr + sbr, sbrV DitherLumaRebuild - mt_expand_multi - mt_inpand_multi - mt_inflate_multi - mt_deflate_multi + mt_expand_multi, mt_inpand_multi + + mt_inflate_multi, mt_deflate_multi + """ @@ -98,7 +98,7 @@ def daa(c, nsize=None, nns=None, qual=None, pscrn=None, int16_prescreener=None, nn = nnedi3(c, field=3) dbl = core.std.Merge(nn[::2], nn[1::2]) dblD = core.std.MakeDiff(c, dbl) - shrpD = core.std.MakeDiff(dbl, core.std.Convolution(dbl, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1] if c.width > 1100 else [1, 2, 1, 2, 4, 2, 1, 2, 1])) + shrpD = core.std.MakeDiff(dbl, dbl.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1] if c.width > 1100 else [1, 2, 1, 2, 4, 2, 1, 2, 1])) DD = core.rgvs.Repair(shrpD, dblD, mode=[13]) return core.std.MergeDiff(dbl, DD) @@ -107,7 +107,7 @@ def daa3mod(c1, nsize=None, nns=None, qual=None, pscrn=None, int16_prescreener=N if not isinstance(c1, vs.VideoNode): raise vs.Error('daa3mod: This is not a clip') - c = core.resize.Spline36(c1, c1.width, c1.height * 3 // 2) + c = c1.resize.Spline36(c1.width, c1.height * 3 // 2) return daa(c, nsize, nns, qual, pscrn, int16_prescreener, int16_predictor, exp, opencl, device).resize.Spline36(c1.width, c1.height) @@ -115,12 +115,12 @@ def mcdaa3(input, nsize=None, nns=None, qual=None, pscrn=None, int16_prescreener if not isinstance(input, vs.VideoNode): raise vs.Error('mcdaa3: This is not a clip') - sup = core.hqdn3d.Hqdn3d(input).fft3dfilter.FFT3DFilter().mv.Super(sharp=1) - fv1 = core.mv.Analyse(sup, isb=False, delta=1, truemotion=False, dct=2) - fv2 = core.mv.Analyse(sup, isb=True, delta=1, truemotion=True, dct=2) + sup = input.hqdn3d.Hqdn3d().neo_fft3d.FFT3D().mv.Super(sharp=1) + fv1 = sup.mv.Analyse(isb=False, delta=1, truemotion=False, dct=2) + fv2 = sup.mv.Analyse(isb=True, delta=1, truemotion=True, dct=2) csaa = daa3mod(input, nsize, nns, qual, pscrn, int16_prescreener, int16_predictor, exp, opencl, device) - momask1 = core.mv.Mask(input, fv1, ml=2, kind=1) - momask2 = core.mv.Mask(input, fv2, ml=3, kind=1) + momask1 = input.mv.Mask(fv1, ml=2, kind=1) + momask2 = input.mv.Mask(fv2, ml=3, kind=1) momask = core.std.Merge(momask1, momask2) return core.std.MaskedMerge(input, csaa, momask) @@ -178,7 +178,7 @@ def santiag_stronger(c, strength, type): if c.format.color_family != vs.GRAY: cshift = [cshift, cshift * (1 << c.format.subsampling_h)] c = Resize(c, w, h // 2, sy=cshift, kernel='point', dmode=1) - return core.eedi2.EEDI2(c, field=field) + return c.eedi2.EEDI2(field=field) elif type == 'eedi3': sclip = nnedi3(c, field=field, dh=dh) return eedi3(c, field=field, dh=dh, sclip=sclip) @@ -188,7 +188,7 @@ def santiag_stronger(c, strength, type): if c.format.color_family != vs.GRAY: cshift = [cshift, cshift * (1 << c.format.subsampling_h)] c = Resize(c, w, h * 2, sy=cshift, dmode=1) - return core.sangnom.SangNom(c, order=field, aa=aa) + return c.sangnom.SangNom(order=field, aa=aa) else: raise vs.Error('santiag: unexpected value for type') @@ -196,14 +196,14 @@ def santiag_stronger(c, strength, type): raise vs.Error('santiag: This is not a clip') type = type.lower() - if typeh is None: - typeh = type - else: - typeh = typeh.lower() - if typev is None: - typev = type - else: - typev = typev.lower() + typeh = type if typeh is None else typeh.lower() + typev = type if typev is None else typev.lower() + + + + + + w = c.width h = c.height @@ -213,16 +213,16 @@ def santiag_stronger(c, strength, type): if strh >= 0: c = santiag_dir(c, strh, typeh, fwh, fhh) if strv >= 0: - c = santiag_dir(core.std.Transpose(c), strv, typev, fh, fw).std.Transpose() + c = santiag_dir(c.std.Transpose(), strv, typev, fh, fw).std.Transpose() if fw is None: fw = w if fh is None: fh = h if strh < 0 and strv < 0: - return core.resize.Spline36(c, fw, fh) - else: - return c + c = c.resize.Spline36(fw, fh) + + return c # FixChromaBleedingMod v1.35 @@ -236,8 +236,9 @@ def santiag_stronger(c, strength, type): def FixChromaBleedingMod(input, cx=4, cy=4, thr=4.0, strength=0.8, blur=False): if not isinstance(input, vs.VideoNode): raise vs.Error('FixChromaBleedingMod: This is not a clip') + if input.format.color_family in [vs.GRAY, vs.RGB]: - raise vs.Error('FixChromaBleedingMod: Gray and RGB color families are not supported') + raise vs.Error('FixChromaBleedingMod: Gray and RGB formats are not supported') neutral = 1 << (input.format.bits_per_sample - 1) peak = (1 << input.format.bits_per_sample) - 1 @@ -260,16 +261,17 @@ def get_lut2(x): q = cround((x - neutral) * (output_high - output_low) / divisor + neutral) return min(max(q, tvLow), tvHigh[1]) if coring else min(max(q, 0), peak) - last = core.std.Lut(clip, planes=[0], function=get_lut1) - if clip.format.color_family == vs.GRAY: - return last - else: - return core.std.Lut(last, planes=[1, 2], function=get_lut2) + last = clip.std.Lut(planes=[0], function=get_lut1) + if clip.format.color_family != vs.GRAY: + last = last.std.Lut(planes=[1, 2], function=get_lut2) + return last + + # prepare to work on the V channel and filter noise vch = mvf.GetPlane(adjust.Tweak(input, sat=thr), 2) if blur: - area = core.std.Convolution(vch, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + area = vch.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) else: area = vch @@ -280,17 +282,17 @@ def get_lut2(x): # merge both masks mask = core.std.Merge(red, blue) if not blur: - mask = core.std.Convolution(mask, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + mask = mask.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) mask = Levels(mask, scale(250, peak), 1.0, scale(250, peak), scale(255, peak), 0) # expand to cover beyond the bleeding areas and shift to compensate the resizing - mask = core.std.Convolution(mask, matrix=[0, 0, 0, 1, 0, 0, 0, 0, 0], divisor=1, saturate=False).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 0, 0, 0], divisor=8, saturate=False) + mask = mask.std.Convolution(matrix=[0, 0, 0, 1, 0, 0, 0, 0, 0], divisor=1, saturate=False).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 0, 0, 0], divisor=8, saturate=False) # binarize (also a trick to expand) mask = Levels(mask, scale(10, peak), 1.0, scale(10, peak), 0, scale(255, peak)).std.Inflate() # prepare a version of the image that has its chroma shifted and less saturated - input_c = adjust.Tweak(core.resize.Spline36(input, src_left=cx, src_top=cy), sat=strength) + input_c = adjust.Tweak(input.resize.Spline36(src_left=cx, src_top=cy), sat=strength) # combine both images using the mask fu = core.std.MaskedMerge(mvf.GetPlane(input, 1), mvf.GetPlane(input_c, 1), mask) @@ -310,34 +312,41 @@ def Deblock_QED(clp, quant1=24, quant2=26, aOff1=1, bOff1=2, aOff2=1, bOff2=2, u if not isinstance(clp, vs.VideoNode): raise vs.Error('Deblock_QED: This is not a clip') - neutral = 1 << (clp.format.bits_per_sample - 1) - peak = (1 << clp.format.bits_per_sample) - 1 + + isGray = (clp.format.color_family == vs.GRAY) planes = [0, 1, 2] if uv >= 3 and not isGray else [0] + if clp.format.sample_type == vs.INTEGER: + neutral = 1 << (clp.format.bits_per_sample - 1) + peak = (1 << clp.format.bits_per_sample) - 1 + else: + neutral = 0.0 + peak = 1.0 + # add borders if clp is not mod 8 w = clp.width h = clp.height padX = 8 - w % 8 if w & 7 else 0 padY = 8 - h % 8 if h & 7 else 0 if padX or padY: - clp = core.resize.Point(clp, w + padX, h + padY, src_width=w + padX, src_height=h + padY) + clp = clp.resize.Point(w + padX, h + padY, src_width=w + padX, src_height=h + padY) # block - block = core.std.BlankClip(clp, width=6, height=6, format=vs.GRAY8, length=1, color=[0]) - block = core.std.AddBorders(block, 1, 1, 1, 1, color=[255]) + block = clp.std.BlankClip(width=6, height=6, format=clp.format.replace(color_family=vs.GRAY, subsampling_w=0, subsampling_h=0), length=1, color=[0]) + block = block.std.AddBorders(1, 1, 1, 1, color=[peak]) block = core.std.StackHorizontal([block for i in range(clp.width // 8)]) block = core.std.StackVertical([block for i in range(clp.height // 8)]) if not isGray: - blockc = core.std.CropAbs(block, width=clp.width >> clp.format.subsampling_w, height=clp.height >> clp.format.subsampling_h) + blockc = block.std.CropAbs(width=clp.width >> clp.format.subsampling_w, height=clp.height >> clp.format.subsampling_h) block = core.std.ShufflePlanes([block, blockc], planes=[0, 0, 0], colorfamily=clp.format.color_family) - if block.format.bits_per_sample != clp.format.bits_per_sample: - block = core.fmtc.bitdepth(block, bits=clp.format.bits_per_sample, fulls=False, fulld=True) - block = core.std.Loop(block, clp.num_frames) + + + block = block.std.Loop(times=clp.num_frames) # create normal deblocking (for block borders) and strong deblocking (for block interiour) - normal = core.deblock.Deblock(clp, quant=quant1, aoffset=aOff1, boffset=bOff1, planes=[0, 1, 2] if uv != 2 and not isGray else [0]) - strong = core.deblock.Deblock(clp, quant=quant2, aoffset=aOff2, boffset=bOff2, planes=[0, 1, 2] if uv != 2 and not isGray else [0]) + normal = clp.deblock.Deblock(quant=quant1, aoffset=aOff1, boffset=bOff1, planes=[0, 1, 2] if uv != 2 and not isGray else [0]) + strong = clp.deblock.Deblock(quant=quant2, aoffset=aOff2, boffset=bOff2, planes=[0, 1, 2] if uv != 2 and not isGray else [0]) # build difference maps of both normalD = core.std.MakeDiff(clp, normal, planes=planes) @@ -356,9 +365,9 @@ def Deblock_QED(clp, quant1=24, quant2=26, aOff1=1, bOff1=2, aOff2=1, bOff2=2, u remX = 16 - sw % 16 if sw & 15 else 0 remY = 16 - sh % 16 if sh & 15 else 0 if remX or remY: - strongD2 = core.resize.Point(strongD2, sw + remX, sh + remY, src_width=sw + remX, src_height=sh + remY) + strongD2 = strongD2.resize.Point(sw + remX, sh + remY, src_width=sw + remX, src_height=sh + remY) expr = f'x {neutral} - 1.01 * {neutral} +' - strongD3 = core.std.Expr([strongD2], expr=[expr] if uv >= 3 or isGray else [expr, '']).dctf.DCTFilter(factors=[1, 1, 0, 0, 0, 0, 0, 0], planes=planes).std.Crop(right=remX, bottom=remY) + strongD3 = strongD2.std.Expr(expr=[expr] if uv >= 3 or isGray else [expr, '']).dctf.DCTFilter(factors=[1, 1, 0, 0, 0, 0, 0, 0], planes=planes).std.Crop(right=remX, bottom=remY) # apply compensation from "normal" deblocking to the borders of the full-block-compensations calculated from "strong" deblocking ... expr = f'y {neutral} = x y ?' @@ -375,7 +384,7 @@ def Deblock_QED(clp, quant1=24, quant2=26, aOff1=1, bOff1=2, aOff2=1, bOff2=2, u deblocked = core.std.ShufflePlanes([deblocked, normal], planes=[0, 1, 2], colorfamily=clp.format.color_family) # remove mod 8 borders - return core.std.Crop(deblocked, right=padX, bottom=padY) + return deblocked.std.Crop(right=padX, bottom=padY) # rx, ry [float, 1.0 ... 2.0 ... ~3.0] @@ -400,7 +409,7 @@ def DeHalo_alpha(clp, rx=2.0, ry=2.0, darkstr=1.0, brightstr=1.0, lowsens=50, hi if clp.format.color_family == vs.RGB: raise vs.Error('DeHalo_alpha: RGB format is not supported') - peak = (1 << clp.format.bits_per_sample) - 1 + peak = (1 << clp.format.bits_per_sample) - 1 if clp.format.sample_type == vs.INTEGER else 1.0 if clp.format.color_family != vs.GRAY: clp_orig = clp @@ -499,7 +508,9 @@ def FineDehalo(src, rx=2.0, ry=None, thmi=80, thma=128, thlimi=50, thlima=100, d if src.format.color_family == vs.RGB: raise vs.Error('FineDehalo: RGB format is not supported') - peak = (1 << src.format.bits_per_sample) - 1 + isInteger = (src.format.sample_type == vs.INTEGER) + + peak = (1 << src.format.bits_per_sample) - 1 if isInteger else 1.0 if src.format.color_family != vs.GRAY: src_orig = src @@ -527,7 +538,7 @@ def FineDehalo(src, rx=2.0, ry=None, thmi=80, thma=128, thlimi=50, thlima=100, d edges = AvsPrewitt(src) # Keeps only the sharpest edges (line edges) - strong = edges.std.Expr(expr=[f'x {scale(thmi, peak)} - {thma - thmi} / 255 *']) + strong = edges.std.Expr(expr=[f'x {scale(thmi, peak)} - {thma - thmi} / 255 *' if isInteger else f'x {scale(thmi, peak)} - {thma - thmi} / 255 * 0 max 1 min']) # Extends them to include the potential halos large = mt_expand_multi(strong, sw=rx_i, sh=ry_i) @@ -538,7 +549,7 @@ def FineDehalo(src, rx=2.0, ry=None, thmi=80, thma=128, thlimi=50, thlima=100, d # producing annoying artifacts. Therefore we have to produce a mask to exclude these zones from the halo removal # Includes more edges than previously, but ignores simple details - light = edges.std.Expr(expr=[f'x {scale(thlimi, peak)} - {thlima - thlimi} / 255 *']) + light = edges.std.Expr(expr=[f'x {scale(thlimi, peak)} - {thlima - thlimi} / 255 *' if isInteger else f'x {scale(thlimi, peak)} - {thlima - thlimi} / 255 * 0 max 1 min']) # To build the exclusion zone, we make grow the edge mask, then shrink it to its original shape # During the growing stage, close adjacent edge masks will join and merge, forming a solid area, which will remain solid even after the shrinking stage @@ -548,7 +559,7 @@ def FineDehalo(src, rx=2.0, ry=None, thmi=80, thma=128, thlimi=50, thlima=100, d # At this point, because the mask was made of a shades of grey, we may end up with large areas of dark grey after shrinking # To avoid this, we amplify and saturate the mask here (actually we could even binarize it) - shrink = shrink.std.Expr(expr=['x 4 *']) + shrink = shrink.std.Expr(expr=['x 4 *' if isInteger else 'x 4 * 1 min']) # Mask shrinking shrink = mt_inpand_multi(shrink, mode='ellipse', sw=rx_i, sh=ry_i) @@ -565,14 +576,14 @@ def FineDehalo(src, rx=2.0, ry=None, thmi=80, thma=128, thlimi=50, thlima=100, d shr_med = strong # Substracts masks and amplifies the difference to be sure we get 255 on the areas to be processed - outside = core.std.Expr([large, shr_med], expr=['x y - 2 *']) + outside = core.std.Expr([large, shr_med], expr=['x y - 2 *' if isInteger else 'x y - 2 * 0 max 1 min']) # If edge processing is required, adds the edgemask if edgeproc > 0: - outside = core.std.Expr([outside, strong], expr=[f'x y {edgeproc * 0.66} * +']) + outside = core.std.Expr([outside, strong], expr=[f'x y {edgeproc * 0.66} * +' if isInteger else f'x y {edgeproc * 0.66} * + 1 min']) # Smooth again and amplify to grow the mask a bit, otherwise the halo parts sticking to the edges could be missed - outside = outside.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]).std.Expr(expr=['x 2 *']) + outside = outside.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]).std.Expr(expr=['x 2 *' if isInteger else 'x 2 * 1 min']) ### Masking ### @@ -611,9 +622,9 @@ def FineDehalo_contrasharp(dehaloed, src, level): raise vs.Error('FineDehalo_contrasharp: RGB format is not supported') if dehaloed.format.id != src.format.id: - raise vs.Error('FineDehalo_contrasharp: Both clips must have the same format') + raise vs.Error('FineDehalo_contrasharp: Clips must be the same format') - neutral = 1 << (dehaloed.format.bits_per_sample - 1) + neutral = 1 << (dehaloed.format.bits_per_sample - 1) if dehaloed.format.sample_type == vs.INTEGER else 0.0 if dehaloed.format.color_family != vs.GRAY: dehaloed_orig = dehaloed @@ -686,8 +697,9 @@ def grow_mask(mask, coordinates): def YAHR(clp, blur=2, depth=32): if not isinstance(clp, vs.VideoNode): raise vs.Error('YAHR: This is not a clip') + if clp.format.color_family == vs.RGB: - raise vs.Error('YAHR: RGB color family is not supported') + raise vs.Error('YAHR: RGB format is not supported') if clp.format.color_family != vs.GRAY: clp_orig = clp @@ -695,10 +707,10 @@ def YAHR(clp, blur=2, depth=32): else: clp_orig = None - b1 = MinBlur(clp, 2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + b1 = MinBlur(clp, r=2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) b1D = core.std.MakeDiff(clp, b1) w1 = Padding(clp, 6, 6, 6, 6).warp.AWarpSharp2(blur=blur, depth=depth).std.Crop(6, 6, 6, 6) - w1b1 = MinBlur(w1, 2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + w1b1 = MinBlur(w1, r=2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) w1b1D = core.std.MakeDiff(w1, w1b1) DD = core.rgvs.Repair(b1D, w1b1D, mode=[13]) DD2 = core.std.MakeDiff(b1D, DD) @@ -751,9 +763,11 @@ def HQDeringmod(input, p=None, ringmask=None, mrad=1, msmooth=1, incedge=False, if ringmask is not None and not isinstance(ringmask, vs.VideoNode): raise vs.Error("HQDeringmod: 'ringmask' is not a clip") + isGray = (input.format.color_family == vs.GRAY) + neutral = 1 << (input.format.bits_per_sample - 1) peak = (1 << input.format.bits_per_sample) - 1 - isGray = (input.format.color_family == vs.GRAY) + if isinstance(planes, int): planes = [planes] @@ -764,7 +778,7 @@ def HQDeringmod(input, p=None, ringmask=None, mrad=1, msmooth=1, incedge=False, # Kernel: Smoothing if p is None: - p = MinBlur(input, nrmode, planes=planes) + p = MinBlur(input, r=nrmode, planes=planes) # Post-Process: Contra-Sharpening matrix1 = [1, 2, 1, 2, 4, 2, 1, 2, 1] @@ -773,13 +787,13 @@ def HQDeringmod(input, p=None, ringmask=None, mrad=1, msmooth=1, incedge=False, if sharp <= 0: sclp = p else: - pre = core.std.Median(p, planes=planes) + pre = p.std.Median(planes=planes) if sharp == 1: - method = core.std.Convolution(pre, matrix=matrix1, planes=planes) + method = pre.std.Convolution(matrix=matrix1, planes=planes) elif sharp == 2: - method = core.std.Convolution(pre, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + method = pre.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) else: - method = core.std.Convolution(pre, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + method = pre.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) sharpdiff = core.std.MakeDiff(pre, method, planes=planes) allD = core.std.MakeDiff(input, p, planes=planes) ssDD = core.rgvs.Repair(sharpdiff, allD, mode=[1 if i in planes else 0 for i in range(input.format.num_planes)]) @@ -831,7 +845,7 @@ def HQDeringmod(input, p=None, ringmask=None, mrad=1, msmooth=1, incedge=False, if isGray: return ringmask else: - return core.std.Expr([ringmask], expr=['', repr(neutral)]) + return ringmask.std.Expr(expr=['', repr(neutral)]) else: return core.std.MaskedMerge(input, limitclp, ringmask, planes=planes, first_plane=True) @@ -1022,14 +1036,18 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= if not isinstance(Input, vs.VideoNode): raise vs.Error('QTGMC: This is not a clip') + if EdiExt is not None and (not isinstance(EdiExt, vs.VideoNode) or EdiExt.format.id != Input.format.id): raise vs.Error("QTGMC: 'EdiExt' must be the same format as input") + if InputType != 1 and not isinstance(TFF, bool): - raise vs.Error("QTGMC: 'TFF' must be set when InputType is not 1. Setting TFF to true means top field first and false means bottom field first") + raise vs.Error("QTGMC: 'TFF' must be set when InputType!=1. Setting TFF to true means top field first and false means bottom field first") + + isGray = (Input.format.color_family == vs.GRAY) neutral = 1 << (Input.format.bits_per_sample - 1) peak = (1 << Input.format.bits_per_sample) - 1 - isGray = (Input.format.color_family == vs.GRAY) + SOvs = scale(SOvs, peak) @@ -1106,7 +1124,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= if totalRestore <= 0: StabilizeNoise = False noiseTD = [1, 3, 5][NoiseTR] - noiseCentre = 128.5 * 2 ** (Input.format.bits_per_sample - 8) if Denoiser in ['fft3df', 'fft3dfilter'] else neutral + noiseCentre = 128.5 * 2 ** (Input.format.bits_per_sample - 8) if Denoiser in ['fft3df', 'fft3dfilter', 'FFT3D'] else neutral # MVTools settings if Lambda is None: @@ -1147,7 +1165,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Pad vertically during processing (to prevent artefacts at top & bottom edges) if Border: h += 8 - clip = core.resize.Point(Input, w, h, src_top=-4, src_height=h) + clip = Input.resize.Point(w, h, src_top=-4, src_height=h) else: clip = Input @@ -1163,7 +1181,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= elif InputType == 1: bobbed = clip else: - bobbed = core.std.Convolution(clip, matrix=[1, 2, 1], mode='v') + bobbed = clip.std.Convolution(matrix=[1, 2, 1], mode='v') CMplanes = [0, 1, 2] if ChromaMotion and not isGray else [0] @@ -1191,9 +1209,9 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Blur image and soften edges to assist in motion matching of edge blocks. Blocks are matched by SAD (sum of absolute differences between blocks), but even # a slight change in an edge from frame to frame will give a high SAD due to the higher contrast of edges if SrchClipPP == 1: - spatialBlur = core.resize.Bilinear(repair0, w // 2, h // 2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes).resize.Bilinear(w, h) + spatialBlur = repair0.resize.Bilinear(w // 2, h // 2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes).resize.Bilinear(w, h) elif SrchClipPP >= 2: - spatialBlur = Resize(core.std.Convolution(repair0, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes), w, h, sw=w + epsilon, sh=h + epsilon, kernel='gauss', a1=2, dmode=1) + spatialBlur = Resize(repair0.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes), w, h, sw=w + epsilon, sh=h + epsilon, kernel='gauss', a1=2, dmode=1) if SrchClipPP > 1: spatialBlur = core.std.Merge(spatialBlur, repair0, weight=[0.1] if ChromaMotion or isGray else [0.1, 0]) if SrchClipPP <= 0: @@ -1211,14 +1229,14 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= analyse_args = dict(blksize=BlockSize, overlap=Overlap, search=Search, searchparam=SearchParam, pelsearch=PelSearch, truemotion=TrueMotion, _lambda=Lambda, lsad=LSAD, pnew=PNew, plevel=PLevel, _global=GlobalMotion, dct=DCT, chroma=ChromaMotion) srchSuper = DitherLumaRebuild(srchClip, s0=1, chroma=ChromaMotion).mv.Super(pel=SubPel, sharp=SubPelInterp, hpad=hpad, vpad=vpad, chroma=ChromaMotion) - bVec1 = core.mv.Analyse(srchSuper, isb=True, delta=1, **analyse_args) - fVec1 = core.mv.Analyse(srchSuper, isb=False, delta=1, **analyse_args) + bVec1 = srchSuper.mv.Analyse(isb=True, delta=1, **analyse_args) + fVec1 = srchSuper.mv.Analyse(isb=False, delta=1, **analyse_args) if maxTR > 1: - bVec2 = core.mv.Analyse(srchSuper, isb=True, delta=2, **analyse_args) - fVec2 = core.mv.Analyse(srchSuper, isb=False, delta=2, **analyse_args) + bVec2 = srchSuper.mv.Analyse(isb=True, delta=2, **analyse_args) + fVec2 = srchSuper.mv.Analyse(isb=False, delta=2, **analyse_args) if maxTR > 2: - bVec3 = core.mv.Analyse(srchSuper, isb=True, delta=3, **analyse_args) - fVec3 = core.mv.Analyse(srchSuper, isb=False, delta=3, **analyse_args) + bVec3 = srchSuper.mv.Analyse(isb=True, delta=3, **analyse_args) + fVec3 = srchSuper.mv.Analyse(isb=False, delta=3, **analyse_args) #--------------------------------------- # Noise Processing @@ -1230,7 +1248,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= else: fullClip = Bob(clip, 0, 1, TFF) if NoiseTR > 0: - fullSuper = core.mv.Super(fullClip, pel=SubPel, levels=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise) #TEST chroma OK? + fullSuper = fullClip.mv.Super(pel=SubPel, levels=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise) #TEST chroma OK? CNplanes = [0, 1, 2] if ChromaNoise and not isGray else [0] @@ -1249,14 +1267,14 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= core.mv.Compensate(fullClip, fullSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2), core.mv.Compensate(fullClip, fullSuper, bVec2, thscd1=ThSCD1, thscd2=ThSCD2)]) if Denoiser == 'dfttest': - dnWindow = core.dfttest.DFTTest(noiseWindow, sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) + dnWindow = noiseWindow.dfttest.DFTTest(sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) elif Denoiser == 'knlmeanscl': if ChromaNoise and not isGray: dnWindow = KNLMeansCL(noiseWindow, d=NoiseTR, h=Sigma) else: - dnWindow = core.knlm.KNLMeansCL(noiseWindow, d=NoiseTR, h=Sigma) + dnWindow = noiseWindow.knlm.KNLMeansCL(d=NoiseTR, h=Sigma) else: - dnWindow = core.neo_fft3d.FFT3D(noiseWindow, sigma=Sigma, planes=CNplanes, bt=noiseTD) + dnWindow = noiseWindow.neo_fft3d.FFT3D(sigma=Sigma, planes=CNplanes, bt=noiseTD) # Rework denoised clip to match source format - various code paths here: discard the motion compensation window, discard doubled lines (from point resize) # Also reweave to get interlaced noise if source was interlaced (could keep the full frame of noise, but it will be poor quality from the point resize) @@ -1264,14 +1282,14 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= if InputType > 0: denoised = dnWindow else: - denoised = Weave(core.std.SeparateFields(dnWindow, TFF).std.SelectEvery(4, [0, 3]), TFF) + denoised = Weave(dnWindow.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[0, 3]), tff=TFF) elif InputType > 0: if NoiseTR <= 0: denoised = dnWindow else: - denoised = core.std.SelectEvery(dnWindow, noiseTD, [NoiseTR]) + denoised = dnWindow.std.SelectEvery(cycle=noiseTD, offsets=[NoiseTR]) else: - denoised = Weave(core.std.SeparateFields(dnWindow, TFF).std.SelectEvery(noiseTD * 4, [NoiseTR * 2, NoiseTR * 6 + 3]), TFF) + denoised = Weave(dnWindow.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=noiseTD * 4, offsets=[NoiseTR * 2, NoiseTR * 6 + 3]), tff=TFF) # Get actual noise from difference. Then 'deinterlace' where we have weaved noise - create the missing lines of noise in various ways if NoiseProcess > 0 and totalRestore > 0: @@ -1283,11 +1301,11 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= elif NoiseDeint == 'generate': deintNoise = QTGMC_Generate2ndFieldNoise(noise, denoised, ChromaNoise, TFF) else: - deintNoise = core.std.SeparateFields(noise, TFF).std.DoubleWeave(TFF) + deintNoise = noise.std.SeparateFields(tff=TFF).std.DoubleWeave(tff=TFF) # Motion-compensated stabilization of generated noise if StabilizeNoise: - noiseSuper = core.mv.Super(deintNoise, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise) + noiseSuper = deintNoise.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise) mcNoise = core.mv.Compensate(deintNoise, noiseSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2) expr = f'x {neutral} - abs y {neutral} - abs > x y ? 0.6 * x y + 0.2 * +' finalNoise = core.std.Expr([deintNoise, mcNoise], expr=[expr] if ChromaNoise or isGray else [expr, '']) @@ -1302,13 +1320,13 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Support badly deinterlaced progressive content - drop half the fields and reweave to get 1/2fps interlaced stream appropriate for QTGMC processing if InputType > 1: - ediInput = Weave(core.std.SeparateFields(innerClip, TFF).std.SelectEvery(4, [0, 3]), TFF) + ediInput = Weave(innerClip.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[0, 3]), tff=TFF) else: ediInput = innerClip # Create interpolated image as starting point for output if EdiExt is not None: - edi1 = core.resize.Point(EdiExt, w, h, src_top=(EdiExt.height - h) // 2, src_height=h) + edi1 = EdiExt.resize.Point(w, h, src_top=(EdiExt.height - h) // 2, src_height=h) else: edi1 = QTGMC_Interpolate(ediInput, InputType, EdiMode, NNSize, NNeurons, EdiQual, EdiMaxD, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, bobbed, ChromaEdi, TFF, opencl, device) @@ -1327,7 +1345,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Get the max/min value for each pixel over neighboring motion-compensated frames - used for temporal sharpness limiting if TR1 > 0 or temporalSL: - ediSuper = core.mv.Super(edi, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + ediSuper = edi.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) if temporalSL: bComp1 = core.mv.Compensate(edi, ediSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2) fComp1 = core.mv.Compensate(edi, ediSuper, fVec1, thscd1=ThSCD1, thscd2=ThSCD2) @@ -1387,23 +1405,23 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= if SMode <= 0: resharp = lossed1 elif SMode == 1: - resharp = core.std.Expr([lossed1, core.std.Convolution(lossed1, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {sharpAdj} * +']) + resharp = core.std.Expr([lossed1, lossed1.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {sharpAdj} * +']) else: - vresharp1 = core.std.Merge(core.std.Maximum(lossed1, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]), core.std.Minimum(lossed1, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])) + vresharp1 = core.std.Merge(lossed1.std.Maximum(coordinates=[0, 1, 0, 0, 0, 0, 1, 0]), lossed1.std.Minimum(coordinates=[0, 1, 0, 0, 0, 0, 1, 0])) if Precise: vresharp = core.std.Expr([vresharp1, lossed1], expr=['x y < x {i} + x y > x {i} - x ? ?'.format(i=scale(1, peak))]) # Precise mode: reduce tiny overshoot else: vresharp = vresharp1 - resharp = core.std.Expr([lossed1, core.std.Convolution(vresharp, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {sharpAdj} * +']) + resharp = core.std.Expr([lossed1, vresharp.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {sharpAdj} * +']) # Slightly thin down 1-pixel high horizontal edges that have been widened into neigboring field lines by the interpolator SVThinSc = SVThin * 6.0 if SVThin > 0: expr = f'y x - {SVThinSc} * {neutral} +' - vertMedD = core.std.Expr([lossed1, core.rgvs.VerticalCleaner(lossed1, mode=[1] if isGray else [1, 0])], expr=[expr] if isGray else [expr, '']) - vertMedD = core.std.Convolution(vertMedD, matrix=[1, 2, 1], planes=[0], mode='h') + vertMedD = core.std.Expr([lossed1, lossed1.rgvs.VerticalCleaner(mode=[1] if isGray else [1, 0])], expr=[expr] if isGray else [expr, '']) + vertMedD = vertMedD.std.Convolution(matrix=[1, 2, 1], planes=[0], mode='h') expr = f'y {neutral} - abs x {neutral} - abs > y {neutral} ?' - neighborD = core.std.Expr([vertMedD, core.std.Convolution(vertMedD, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0])], expr=[expr] if isGray else [expr, '']) + neighborD = core.std.Expr([vertMedD, vertMedD.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0])], expr=[expr] if isGray else [expr, '']) thin = core.std.MergeDiff(resharp, neighborD, planes=[0]) else: thin = resharp @@ -1444,11 +1462,11 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= addNoise1 = backBlend2 else: expr = f'x {noiseCentre} - {GrainRestore} * {neutral} +' - addNoise1 = core.std.MergeDiff(backBlend2, core.std.Expr([finalNoise], expr=[expr] if ChromaNoise or isGray else [expr, '']), planes=CNplanes) + addNoise1 = core.std.MergeDiff(backBlend2, finalNoise.std.Expr(expr=[expr] if ChromaNoise or isGray else [expr, '']), planes=CNplanes) # Final light linear temporal smooth for denoising if TR2 > 0: - stableSuper = core.mv.Super(addNoise1, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + stableSuper = addNoise1.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) if TR2 <= 0: stable = addNoise1 elif TR2 == 1: @@ -1489,7 +1507,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= addNoise2 = lossed2 else: expr = f'x {noiseCentre} - {NoiseRestore} * {neutral} +' - addNoise2 = core.std.MergeDiff(lossed2, core.std.Expr([finalNoise], expr=[expr] if ChromaNoise or isGray else [expr, '']), planes=CNplanes) + addNoise2 = core.std.MergeDiff(lossed2, finalNoise.std.Expr(expr=[expr] if ChromaNoise or isGray else [expr, '']), planes=CNplanes) #--------------------------------------- # Post-Processing @@ -1520,7 +1538,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Shutter motion blur - use MFlowBlur to blur along motion vectors if ShutterBlur > 0: - sblurSuper = core.mv.Super(addNoise2, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + sblurSuper = addNoise2.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) sblur = core.mv.FlowBlur(addNoise2, sblurSuper, sbBVec1, sbFVec1, blur=blurLevel, thscd1=ThSCD1, thscd2=ThSCD2) # Shutter motion blur - use motion mask to reduce blurring in areas of low motion - also helps reduce blur "bleeding" into static areas, then select blur type @@ -1534,13 +1552,13 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= # Reduce frame rate if FPSDivisor > 1: - decimated = core.std.SelectEvery(sblurred, FPSDivisor, [0]) + decimated = sblurred.std.SelectEvery(cycle=FPSDivisor, offsets=[0]) else: decimated = sblurred # Crop off temporary vertical padding if Border: - cropped = core.std.Crop(decimated, top=4, bottom=4) + cropped = decimated.std.Crop(top=4, bottom=4) else: cropped = decimated @@ -1549,8 +1567,8 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= output = cropped else: expr = f'x {neutral} - {ShowNoise} * {neutral} +' - output = core.std.Expr([finalNoise], expr=[expr] if ChromaNoise or isGray else [expr, repr(neutral)]) - output = core.std.SetFieldBased(output, value=0) + output = finalNoise.std.Expr(expr=[expr] if ChromaNoise or isGray else [expr, repr(neutral)]) + output = output.std.SetFieldBased(value=0) if not ShowSettings: return output else: @@ -1564,7 +1582,7 @@ def QTGMC(Input, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1= f"GrainRestore={GrainRestore} | NoiseRestore={NoiseRestore} | NoiseDeint='{NoiseDeint}' | StabilizeNoise={StabilizeNoise} | InputType={InputType} | ProgSADMask={ProgSADMask} | " + \ f"FPSDivisor={FPSDivisor} | ShutterBlur={ShutterBlur} | ShutterAngleSrc={ShutterAngleSrc} | ShutterAngleOut={ShutterAngleOut} | SBlurLimit={SBlurLimit} | Border={Border} | " + \ f"Precise={Precise} | Preset='{Preset}' | Tuning='{Tuning}' | ForceTR={ForceTR}" - return core.text.Text(output, text) + return output.text.Text(text=text) #--------------------------------------- # Helpers @@ -1615,11 +1633,14 @@ def QTGMC_Interpolate(Input, InputType, EdiMode, NNSize, NNeurons, EdiQual, EdiM # Rough algorithm: Get difference, deflate vertically by a couple of pixels or so, then inflate again. Thin regions will be removed # by this process. Restore remaining areas of difference back to as they were in reference clip def QTGMC_KeepOnlyBobShimmerFixes(Input, Ref, Rep=1, Chroma=True): - neutral = 1 << (Input.format.bits_per_sample - 1) - peak = (1 << Input.format.bits_per_sample) - 1 + + isGray = (Input.format.color_family == vs.GRAY) planes = [0, 1, 2] if Chroma and not isGray else [0] + neutral = 1 << (Input.format.bits_per_sample - 1) + peak = (1 << Input.format.bits_per_sample) - 1 + # ed is the erosion distance - how much to deflate then reflate to remove thin areas of interest: 0 = minimum to 6 = maximum # od is over-dilation level - extra inflation to ensure areas to restore back are fully caught: 0 = none to 3 = one full pixel # If Rep < 10, then ed = Rep and od = 0, otherwise ed = 10s digit and od = 1s digit (nasty method, but kept for compatibility with original TGMC) @@ -1628,40 +1649,40 @@ def QTGMC_KeepOnlyBobShimmerFixes(Input, Ref, Rep=1, Chroma=True): diff = core.std.MakeDiff(Ref, Input) - # Areas of positive difference # ed = 0 1 2 3 4 5 6 7 - choke1 = core.std.Minimum(diff, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \ - if ed > 2: choke1 = core.std.Minimum(choke1, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . x x x x x 1 pixel | Deflate to remove thin areas - if ed > 5: choke1 = core.std.Minimum(choke1, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . . x x 1 pixel / - if ed % 3 != 0: choke1 = core.std.Deflate(choke1, planes=planes) # . x x . x x . x A bit more deflate & some horizonal effect - if ed in [2, 5]: choke1 = core.std.Median(choke1, planes=planes) # . . x . . x . . Local median - choke1 = core.std.Maximum(choke1, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \ - if ed > 1: choke1 = core.std.Maximum(choke1, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . x x x x x x 1 pixel | Reflate again - if ed > 4: choke1 = core.std.Maximum(choke1, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . x x x 1 pixel / + # Areas of positive difference # ed = 0 1 2 3 4 5 6 7 + choke1 = diff.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \ + if ed > 2: choke1 = choke1.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . x x x x x 1 pixel | Deflate to remove thin areas + if ed > 5: choke1 = choke1.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . . x x 1 pixel / + if ed % 3 != 0: choke1 = choke1.std.Deflate(planes=planes) # . x x . x x . x A bit more deflate & some horizonal effect + if ed in [2, 5]: choke1 = choke1.std.Median(planes=planes) # . . x . . x . . Local median + choke1 = choke1.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \ + if ed > 1: choke1 = choke1.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . x x x x x x 1 pixel | Reflate again + if ed > 4: choke1 = choke1.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . x x x 1 pixel / # Over-dilation - extra reflation up to about 1 pixel if od == 1: - choke1 = core.std.Inflate(choke1, planes=planes) + choke1 = choke1.std.Inflate(planes=planes) elif od == 2: - choke1 = core.std.Inflate(choke1, planes=planes).std.Inflate(planes=planes) + choke1 = choke1.std.Inflate(planes=planes).std.Inflate(planes=planes) elif od >= 3: - choke1 = core.std.Maximum(choke1, planes=planes) + choke1 = choke1.std.Maximum(planes=planes) # Areas of negative difference (similar to above) - choke2 = core.std.Maximum(diff, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) - if ed > 2: choke2 = core.std.Maximum(choke2, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) - if ed > 5: choke2 = core.std.Maximum(choke2, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) - if ed % 3 != 0: choke2 = core.std.Inflate(choke2, planes=planes) - if ed in [2, 5]: choke2 = core.std.Median(choke2, planes=planes) - choke2 = core.std.Minimum(choke2, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) - if ed > 1: choke2 = core.std.Minimum(choke2, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) - if ed > 4: choke2 = core.std.Minimum(choke2, planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + choke2 = diff.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + if ed > 2: choke2 = choke2.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + if ed > 5: choke2 = choke2.std.Maximum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + if ed % 3 != 0: choke2 = choke2.std.Inflate(planes=planes) + if ed in [2, 5]: choke2 = choke2.std.Median(planes=planes) + choke2 = choke2.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + if ed > 1: choke2 = choke2.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) + if ed > 4: choke2 = choke2.std.Minimum(planes=planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) if od == 1: - choke2 = core.std.Deflate(choke2, planes=planes) + choke2 = choke2.std.Deflate(planes=planes) elif od == 2: - choke2 = core.std.Deflate(choke2, planes=planes).std.Deflate(planes=planes) + choke2 = choke2.std.Deflate(planes=planes).std.Deflate(planes=planes) elif od >= 3: - choke2 = core.std.Minimum(choke2, planes=planes) + choke2 = choke2.std.Minimum(planes=planes) # Combine above areas to find those areas of difference to restore expr1 = f'x {scale(129, peak)} < x y {neutral} < {neutral} y ? ?' @@ -1673,20 +1694,23 @@ def QTGMC_KeepOnlyBobShimmerFixes(Input, Ref, Rep=1, Chroma=True): # Given noise extracted from an interlaced source (i.e. the noise is interlaced), generate "progressive" noise with a new "field" of noise injected. The new # noise is centered on a weighted local average and uses the difference between local min & max as an estimate of local variance def QTGMC_Generate2ndFieldNoise(Input, InterleavedClip, ChromaNoise=False, TFF=None): - neutral = 1 << (Input.format.bits_per_sample - 1) - peak = (1 << Input.format.bits_per_sample) - 1 + + isGray = (Input.format.color_family == vs.GRAY) planes = [0, 1, 2] if ChromaNoise and not isGray else [0] - origNoise = core.std.SeparateFields(Input, TFF) - noiseMax = core.std.Maximum(origNoise, planes=planes).std.Maximum(planes=planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0]) - noiseMin = core.std.Minimum(origNoise, planes=planes).std.Minimum(planes=planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0]) - random = core.std.SeparateFields(InterleavedClip, TFF).std.BlankClip(color=[neutral] * Input.format.num_planes).grain.Add(var=1800, uvar=1800 if ChromaNoise else 0) + neutral = 1 << (Input.format.bits_per_sample - 1) + peak = (1 << Input.format.bits_per_sample) - 1 + + origNoise = Input.std.SeparateFields(tff=TFF) + noiseMax = origNoise.std.Maximum(planes=planes).std.Maximum(planes=planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0]) + noiseMin = origNoise.std.Minimum(planes=planes).std.Minimum(planes=planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0]) + random = InterleavedClip.std.SeparateFields(tff=TFF).std.BlankClip(color=[neutral] * Input.format.num_planes).grain.Add(var=1800, uvar=1800 if ChromaNoise else 0) expr = f'x {neutral} - y * {scale(256, peak)} / {neutral} +' varRandom = core.std.Expr([core.std.MakeDiff(noiseMax, noiseMin, planes=planes), random], expr=[expr] if ChromaNoise or isGray else [expr, '']) newNoise = core.std.MergeDiff(noiseMin, varRandom, planes=planes) - return Weave(core.std.Interleave([origNoise, newNoise]), TFF) + return Weave(core.std.Interleave([origNoise, newNoise]), tff=TFF) # Insert the source lines into the result to create a true lossless output. However, the other lines in the result have had considerable processing and won't # exactly match source lines. There will be some slight residual combing. Use vertical medians to clean a little of this away @@ -1698,22 +1722,22 @@ def QTGMC_MakeLossless(Input, Source, InputType, TFF): # Weave the source fields and the "new" fields that have generated in the input if InputType <= 0: - srcFields = core.std.SeparateFields(Source, TFF) + srcFields = Source.std.SeparateFields(tff=TFF) else: - srcFields = core.std.SeparateFields(Source, TFF).std.SelectEvery(4, [0, 3]) - newFields = core.std.SeparateFields(Input, TFF).std.SelectEvery(4, [1, 2]) - processed = Weave(core.std.Interleave([srcFields, newFields]).std.SelectEvery(4, [0, 1, 3, 2]), TFF) + srcFields = Source.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[0, 3]) + newFields = Input.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[1, 2]) + processed = Weave(core.std.Interleave([srcFields, newFields]).std.SelectEvery(cycle=4, offsets=[0, 1, 3, 2]), tff=TFF) # Clean some of the artefacts caused by the above - creating a second version of the "new" fields - vertMedian = core.rgvs.VerticalCleaner(processed, mode=[1]) + vertMedian = processed.rgvs.VerticalCleaner(mode=[1]) vertMedDiff = core.std.MakeDiff(processed, vertMedian) - vmNewDiff1 = core.std.SeparateFields(vertMedDiff, TFF).std.SelectEvery(4, [1, 2]) + vmNewDiff1 = vertMedDiff.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[1, 2]) expr = f'x {neutral} - y {neutral} - * 0 < {neutral} x {neutral} - abs y {neutral} - abs < x y ? ?' - vmNewDiff2 = core.std.Expr([core.rgvs.VerticalCleaner(vmNewDiff1, mode=[1]), vmNewDiff1], expr=[expr]) - vmNewDiff3 = core.rgvs.Repair(vmNewDiff2, core.rgvs.RemoveGrain(vmNewDiff2, mode=[2]), mode=[1]) + vmNewDiff2 = core.std.Expr([vmNewDiff1.rgvs.VerticalCleaner(mode=[1]), vmNewDiff1], expr=[expr]) + vmNewDiff3 = core.rgvs.Repair(vmNewDiff2, vmNewDiff2.rgvs.RemoveGrain(mode=[2]), mode=[1]) # Reweave final result - return Weave(core.std.Interleave([srcFields, core.std.MakeDiff(newFields, vmNewDiff3)]).std.SelectEvery(4, [0, 1, 3, 2]), TFF) + return Weave(core.std.Interleave([srcFields, core.std.MakeDiff(newFields, vmNewDiff3)]).std.SelectEvery(cycle=4, offsets=[0, 1, 3, 2]), tff=TFF) # Source-match, a three stage process that takes the difference between deinterlaced input and the original interlaced source, to shift the input more towards # the source without introducing shimmer. All other arguments defined in main script @@ -1733,7 +1757,7 @@ def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, if SourceMatch < 1 or InputType == 1: match1Clip = Deinterlace else: - match1Clip = Weave(core.std.SeparateFields(Deinterlace, TFF).std.SelectEvery(4, [0, 3]), TFF) + match1Clip = Weave(Deinterlace.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[0, 3]), tff=TFF) if SourceMatch < 1 or MatchTR1 <= 0: match1Update = Source else: @@ -1742,7 +1766,7 @@ def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, match1Edi = QTGMC_Interpolate(match1Update, InputType, MatchEdi, MatchNNSize, MatchNNeurons, MatchEdiQual, MatchEdiMaxD, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, TFF=TFF, opencl=opencl, device=device) if MatchTR1 > 0: - match1Super = core.mv.Super(match1Edi, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + match1Super = match1Edi.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) match1Degrain1 = core.mv.Degrain1(match1Edi, match1Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) if MatchTR1 > 1: match1Degrain2 = core.mv.Degrain1(match1Edi, match1Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) @@ -1760,7 +1784,7 @@ def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, # Enhance effect of source-match stages 2 & 3 by sharpening clip prior to refinement (source-match tends to underestimate so this will leave result sharper) if SourceMatch > 1 and MatchEnhance > 0: - match1Shp = core.std.Expr([match1, core.std.Convolution(match1, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {MatchEnhance} * +']) + match1Shp = core.std.Expr([match1, match1.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])], expr=[f'x x y - {MatchEnhance} * +']) else: match1Shp = match1 @@ -1771,13 +1795,13 @@ def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, if SourceMatch < 2 or InputType == 1: match2Clip = match1Shp else: - match2Clip = Weave(core.std.SeparateFields(match1Shp, TFF).std.SelectEvery(4, [0, 3]), TFF) + match2Clip = Weave(match1Shp.std.SeparateFields(tff=TFF).std.SelectEvery(cycle=4, offsets=[0, 3]), tff=TFF) if SourceMatch > 1: match2Diff = core.std.MakeDiff(Source, match2Clip) match2Edi = QTGMC_Interpolate(match2Diff, InputType, MatchEdi2, MatchNNSize2, MatchNNeurons2, MatchEdiQual2, MatchEdiMaxD2, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, TFF=TFF, opencl=opencl, device=device) if MatchTR2 > 0: - match2Super = core.mv.Super(match2Edi, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + match2Super = match2Edi.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) match2Degrain1 = core.mv.Degrain1(match2Edi, match2Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) if MatchTR2 > 1: match2Degrain2 = core.mv.Degrain1(match2Edi, match2Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) @@ -1798,7 +1822,7 @@ def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, match3Update = core.std.Expr([match2Edi, match2], expr=[f'x {errorAdjust2 + 1} * y {errorAdjust2} * -']) if SourceMatch > 2: if MatchTR2 > 0: - match3Super = core.mv.Super(match3Update, pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) + match3Super = match3Update.mv.Super(pel=SubPel, sharp=SubPelInterp, levels=1, hpad=hpad, vpad=vpad) match3Degrain1 = core.mv.Degrain1(match3Update, match3Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) if MatchTR2 > 1: match3Degrain2 = core.mv.Degrain1(match3Update, match3Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) @@ -1826,33 +1850,36 @@ def smartfademod(clip, threshold=0.4, show=False, tff=None): def frame_eval(n, f, orig, defade): diff = abs(f[0].props['PlaneStatsAverage'] - f[1].props['PlaneStatsAverage']) * 255 if show: - return core.text.Text(orig, diff) - else: - return defade if diff > threshold else orig + return orig.text.Text(text=diff) + + return defade if diff > threshold else orig if not isinstance(clip, vs.VideoNode): raise vs.Error('smartfademod: This is not a clip') + if not isinstance(tff, bool): raise vs.Error("smartfademod: 'tff' must be set. Setting tff to true means top field first and false means bottom field first") - sep = core.std.SeparateFields(clip, tff) - even = core.std.PlaneStats(sep[::2]) - odd = core.std.PlaneStats(sep[1::2]) - return core.std.FrameEval(clip, eval=partial(frame_eval, orig=clip, defade=daa(clip)), prop_src=[even, odd]) + sep = clip.std.SeparateFields(tff=tff) + even = sep[::2].std.PlaneStats() + odd = sep[1::2].std.PlaneStats() + return clip.std.FrameEval(eval=partial(frame_eval, orig=clip, defade=daa(clip)), prop_src=[even, odd]) ###### srestore v2.7e ###### def srestore(source, frate=None, omode=6, speed=None, mode=2, thresh=16, dclip=None): if not isinstance(source, vs.VideoNode): raise vs.Error('srestore: This is not a clip') + if source.format.color_family != vs.YUV: - raise vs.Error('srestore: Only YUV color family supported') + raise vs.Error('srestore: Only YUV format is supported') + if dclip is None: dclip = source elif not isinstance(dclip, vs.VideoNode): raise vs.Error("srestore: 'dclip' is not a clip") elif dclip.format.color_family != vs.YUV: - raise vs.Error('srestore: Only YUV color family supported') + raise vs.Error('srestore: Only YUV format is supported') neutral = 1 << (source.format.bits_per_sample - 1) peak = (1 << source.format.bits_per_sample) - 1 @@ -1888,42 +1915,42 @@ def srestore(source, frate=None, omode=6, speed=None, mode=2, thresh=16, dclip=N ###### source preparation & lut ###### if abs(mode) >= 2 and not bom: - mec = core.std.Merge(core.std.Merge(source, core.std.Trim(source, 1), weight=[0, 0.5]), core.std.Trim(source, 1), weight=[0.5, 0]) + mec = core.std.Merge(core.std.Merge(source, source.std.Trim(first=1), weight=[0, 0.5]), source.std.Trim(first=1), weight=[0.5, 0]) if dclip.format.id != vs.YUV420P8: - dclip = core.resize.Bicubic(dclip, format=vs.YUV420P8) - dclip = core.resize.Point(dclip, dclip.width if srad == 4 else int(dclip.width / 2 / srad + 4) * 4, dclip.height if srad == 4 else int(dclip.height / 2 / srad + 4) * 4).std.Trim(2) + dclip = dclip.resize.Bicubic(format=vs.YUV420P8) + dclip = dclip.resize.Point(dclip.width if srad == 4 else int(dclip.width / 2 / srad + 4) * 4, dclip.height if srad == 4 else int(dclip.height / 2 / srad + 4) * 4).std.Trim(first=2) if mode < 0: dclip = core.std.StackVertical([core.std.StackHorizontal([mvf.GetPlane(dclip, 1), mvf.GetPlane(dclip, 2)]), mvf.GetPlane(dclip, 0)]) else: dclip = mvf.GetPlane(dclip, 0) if bom: - dclip = core.std.Expr([dclip], expr=['x 0.5 * 64 +']) + dclip = dclip.std.Expr(expr=['x 0.5 * 64 +']) expr1 = 'x 128 - y 128 - * 0 > x 128 - abs y 128 - abs < x 128 - 128 x - * y 128 - 128 y - * ? x y + 256 - dup * ? 0.25 * 128 +' expr2 = 'x y - dup * 3 * x y + 256 - dup * - 128 +' - diff = core.std.MakeDiff(dclip, core.std.Trim(dclip, 1)) + diff = core.std.MakeDiff(dclip, dclip.std.Trim(first=1)) if not bom: - bclp = core.std.Expr([diff, core.std.Trim(diff, 1)], expr=[expr1]).resize.Bilinear(bsize, bsize) + bclp = core.std.Expr([diff, diff.std.Trim(first=1)], expr=[expr1]).resize.Bilinear(bsize, bsize) else: - bclp = core.std.Expr([core.std.Trim(diff, 1), core.std.MergeDiff(diff, core.std.Trim(diff, 2))], expr=[expr2]).resize.Bilinear(bsize, bsize) - dclp = core.std.Trim(diff, 1).std.Lut(function=lambda x: max(cround(abs(x - 128) ** 1.1 - 1), 0)).resize.Bilinear(bsize, bsize) + bclp = core.std.Expr([diff.std.Trim(first=1), core.std.MergeDiff(diff, diff.std.Trim(first=2))], expr=[expr2]).resize.Bilinear(bsize, bsize) + dclp = diff.std.Trim(first=1).std.Lut(function=lambda x: max(cround(abs(x - 128) ** 1.1 - 1), 0)).resize.Bilinear(bsize, bsize) ###### postprocessing ###### if bom: - sourceDuplicate = core.std.DuplicateFrames(source, [0]) - sourceTrim1 = core.std.Trim(source, 1) - sourceTrim2 = core.std.Trim(source, 2) + sourceDuplicate = source.std.DuplicateFrames(frames=[0]) + sourceTrim1 = source.std.Trim(first=1) + sourceTrim2 = source.std.Trim(first=2) unblend1 = core.std.Expr([sourceDuplicate, source], expr=['x -1 * y 2 * +']) unblend2 = core.std.Expr([sourceTrim1, sourceTrim2], expr=['x 2 * y -1 * +']) - qmask1 = core.std.MakeDiff(core.std.Convolution(unblend1, matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=[0]), unblend1, planes=[0]) - qmask2 = core.std.MakeDiff(core.std.Convolution(unblend2, matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=[0]), unblend2, planes=[0]) + qmask1 = core.std.MakeDiff(unblend1.std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=[0]), unblend1, planes=[0]) + qmask2 = core.std.MakeDiff(unblend2.std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=[0]), unblend2, planes=[0]) diffm = core.std.MakeDiff(sourceDuplicate, source, planes=[0]).std.Maximum(planes=[0]) bmask = core.std.Expr([qmask1, qmask2], expr=[f'x {neutral} - dup * dup y {neutral} - dup * + / {peak} *', '']) expr = 'x 2 * y < x {i} < and 0 y 2 * x < y {i} < and {peak} x x y + / {j} * {k} + ? ?'.format(i=scale(4, peak), peak=peak, j=scale(200, peak), k=scale(28, peak)) - dmask = core.std.Expr([diffm, core.std.Trim(diffm, 2)], expr=[expr, '']) + dmask = core.std.Expr([diffm, diffm.std.Trim(first=2)], expr=[expr, '']) pmask = core.std.Expr([dmask, bmask], expr=[f'y 0 > y {peak} < and x 0 = x {peak} = or and x y ?', '']) matrix = [1, 2, 1, 2, 4, 2, 1, 2, 1] @@ -1932,11 +1959,11 @@ def srestore(source, frate=None, omode=6, speed=None, mode=2, thresh=16, dclip=N if omode == 'pp0': fin = core.std.Expr([sourceDuplicate, source, sourceTrim1, sourceTrim2], expr=['x -0.5 * y + z + a -0.5 * +']) elif omode == 'pp1': - fin = core.std.MaskedMerge(unblend1, unblend2, core.std.Convolution(dmask, matrix=matrix, planes=[0]).std.Expr(expr=['', repr(neutral)])) + fin = core.std.MaskedMerge(unblend1, unblend2, dmask.std.Convolution(matrix=matrix, planes=[0]).std.Expr(expr=['', repr(neutral)])) elif omode == 'pp2': - fin = core.std.MaskedMerge(unblend1, unblend2, core.std.Convolution(bmask, matrix=matrix, planes=[0]), first_plane=True) + fin = core.std.MaskedMerge(unblend1, unblend2, bmask.std.Convolution(matrix=matrix, planes=[0]), first_plane=True) elif omode == 'pp3': - fin = core.std.MaskedMerge(unblend1, unblend2, core.std.Convolution(pmask, matrix=matrix, planes=[0]), first_plane=True).std.Convolution(matrix=matrix, planes=[1, 2]) + fin = core.std.MaskedMerge(unblend1, unblend2, pmask.std.Convolution(matrix=matrix, planes=[0]), first_plane=True).std.Convolution(matrix=matrix, planes=[1, 2]) else: raise vs.Error('srestore: unexpected value for omode') @@ -2156,24 +2183,25 @@ def srestore_inside(n, f): oclp = mec if mer and dup == 0 else source opos += dup - (1 if dup == 0 and mer and dbc < dcn else 0) if opos < 0: - return core.std.DuplicateFrames(oclp, [0] * -opos) + return oclp.std.DuplicateFrames(frames=[0] * -opos) else: - return core.std.Trim(oclp, opos) + return oclp.std.Trim(first=opos) ###### evaluation call & output calculation ###### - bclpYStats = core.std.PlaneStats(bclp) - dclpYStats = core.std.PlaneStats(dclp) - dclipYStats = core.std.PlaneStats(dclip, core.std.Trim(dclip, 2)) - last = core.std.FrameEval(source, eval=srestore_inside, prop_src=[bclpYStats, dclpYStats, dclipYStats]) + bclpYStats = bclp.std.PlaneStats() + dclpYStats = dclp.std.PlaneStats() + dclipYStats = core.std.PlaneStats(dclip, dclip.std.Trim(first=2)) + last = source.std.FrameEval(eval=srestore_inside, prop_src=[bclpYStats, dclpYStats, dclipYStats]) ###### final decimation ###### - return ChangeFPS(core.std.Cache(last, make_linear=True), source.fps_num * numr, source.fps_den * denm) + return ChangeFPS(last.std.Cache(make_linear=True), source.fps_num * numr, source.fps_den * denm) # frame_ref = start of AABCD pattern def dec_txt60mc(src, frame_ref, srcbob=False, draft=False, tff=None, opencl=False, device=None): if not isinstance(src, vs.VideoNode): raise vs.Error('dec_txt60mc: This is not a clip') + if not (srcbob or isinstance(tff, bool)): raise vs.Error("dec_txt60mc: 'tff' must be set when srcbob=False. Setting tff to true means top field first and false means bottom field first") @@ -2191,27 +2219,28 @@ def dec_txt60mc(src, frame_ref, srcbob=False, draft=False, tff=None, opencl=Fals else: last = QTGMC(src, TR0=1, TR1=1, TR2=1, SourceMatch=3, Lossless=2, TFF=tff, opencl=opencl, device=device) - clean = core.std.SelectEvery(last, 5, [4 - invpos]) + clean = last.std.SelectEvery(cycle=5, offsets=[4 - invpos]) if invpos > 2: - jitter = core.std.AssumeFPS(core.std.Trim(last, 0, 0) * 2 + core.std.SelectEvery(last, 5, [6 - invpos, 7 - invpos]), fpsnum=24000, fpsden=1001) + jitter = core.std.AssumeFPS(last.std.Trim(length=1) * 2 + last.std.SelectEvery(cycle=5, offsets=[6 - invpos, 7 - invpos]), fpsnum=24000, fpsden=1001) elif invpos > 1: - jitter = core.std.AssumeFPS(core.std.Trim(last, 0, 0) + core.std.SelectEvery(last, 5, [2 - invpos, 6 - invpos]), fpsnum=24000, fpsden=1001) + jitter = core.std.AssumeFPS(last.std.Trim(length=1) + last.std.SelectEvery(cycle=5, offsets=[2 - invpos, 6 - invpos]), fpsnum=24000, fpsden=1001) else: - jitter = core.std.SelectEvery(last, 5, [1 - invpos, 2 - invpos]) + jitter = last.std.SelectEvery(cycle=5, offsets=[1 - invpos, 2 - invpos]) jsup_pre = DitherLumaRebuild(jitter, s0=1).mv.Super(pel=pel) - jsup = core.mv.Super(jitter, pel=pel, levels=1) - vect_f = core.mv.Analyse(jsup_pre, blksize=blksize, isb=False, delta=1, overlap=overlap) - vect_b = core.mv.Analyse(jsup_pre, blksize=blksize, isb=True, delta=1, overlap=overlap) + jsup = jitter.mv.Super(pel=pel, levels=1) + vect_f = jsup_pre.mv.Analyse(blksize=blksize, isb=False, delta=1, overlap=overlap) + vect_b = jsup_pre.mv.Analyse(blksize=blksize, isb=True, delta=1, overlap=overlap) comp = core.mv.FlowInter(jitter, jsup, vect_b, vect_f) fixed = comp[::2] last = core.std.Interleave([fixed, clean]) - return core.std.Trim(last, invpos // 3) + return last.std.Trim(first=invpos // 3) # 30pテロ部を24pに変換して返す def ivtc_txt30mc(src, frame_ref, draft=False, tff=None, opencl=False, device=None): if not isinstance(src, vs.VideoNode): raise vs.Error('ivtc_txt30mc: This is not a clip') + if not isinstance(tff, bool): raise vs.Error("ivtc_txt30mc: 'tff' must be set. Setting tff to true means top field first and false means bottom field first") @@ -2230,42 +2259,42 @@ def ivtc_txt30mc(src, frame_ref, draft=False, tff=None, opencl=False, device=Non if pattern == 0: if offset == -1: - c1 = core.std.AssumeFPS(core.std.Trim(last, 0, 0) + core.std.SelectEvery(last, 10, [2 + offset, 7 + offset, 5 + offset, 10 + offset]), fpsnum=24000, fpsden=1001) + c1 = core.std.AssumeFPS(last.std.Trim(length=1) + last.std.SelectEvery(cycle=10, offsets=[2 + offset, 7 + offset, 5 + offset, 10 + offset]), fpsnum=24000, fpsden=1001) else: - c1 = core.std.SelectEvery(last, 10, [offset, 2 + offset, 7 + offset, 5 + offset]) + c1 = last.std.SelectEvery(cycle=10, offsets=[offset, 2 + offset, 7 + offset, 5 + offset]) if offset == 1: - part1 = core.std.SelectEvery(last, 10, [4]) - part2 = core.std.SelectEvery(last, 10, [5]) - part3 = core.std.Trim(last, 10).std.SelectEvery(10, [0]) - part4 = core.std.SelectEvery(last, 10, [9]) + part1 = last.std.SelectEvery(cycle=10, offsets=[4]) + part2 = last.std.SelectEvery(cycle=10, offsets=[5]) + part3 = last.std.Trim(first=10).std.SelectEvery(cycle=10, offsets=[0]) + part4 = last.std.SelectEvery(cycle=10, offsets=[9]) c2 = core.std.Interleave([part1, part2, part3, part4]) else: - c2 = core.std.SelectEvery(last, 10, [3 + offset, 4 + offset, 9 + offset, 8 + offset]) + c2 = last.std.SelectEvery(cycle=10, offsets=[3 + offset, 4 + offset, 9 + offset, 8 + offset]) else: if offset == 1: - part1 = core.std.SelectEvery(last, 10, [3]) - part2 = core.std.SelectEvery(last, 10, [5]) - part3 = core.std.Trim(last, 10).std.SelectEvery(10, [0]) - part4 = core.std.SelectEvery(last, 10, [8]) + part1 = last.std.SelectEvery(cycle=10, offsets=[3]) + part2 = last.std.SelectEvery(cycle=10, offsets=[5]) + part3 = last.std.Trim(first=10).std.SelectEvery(cycle=10, offsets=[0]) + part4 = last.std.SelectEvery(cycle=10, offsets=[8]) c1 = core.std.Interleave([part1, part2, part3, part4]) else: - c1 = core.std.SelectEvery(last, 10, [2 + offset, 4 + offset, 9 + offset, 7 + offset]) + c1 = last.std.SelectEvery(cycle=10, offsets=[2 + offset, 4 + offset, 9 + offset, 7 + offset]) if offset == -1: - c2 = core.std.AssumeFPS(core.std.Trim(last, 0, 0) + core.std.SelectEvery(last, 10, [1 + offset, 6 + offset, 5 + offset, 10 + offset]), fpsnum=24000, fpsden=1001) + c2 = core.std.AssumeFPS(last.std.Trim(length=1) + last.std.SelectEvery(cycle=10, offsets=[1 + offset, 6 + offset, 5 + offset, 10 + offset]), fpsnum=24000, fpsden=1001) else: - c2 = core.std.SelectEvery(last, 10, [offset, 1 + offset, 6 + offset, 5 + offset]) + c2 = last.std.SelectEvery(cycle=10, offsets=[offset, 1 + offset, 6 + offset, 5 + offset]) super1_pre = DitherLumaRebuild(c1, s0=1).mv.Super(pel=pel) - super1 = core.mv.Super(c1, pel=pel, levels=1) - vect_f1 = core.mv.Analyse(super1_pre, blksize=blksize, isb=False, delta=1, overlap=overlap) - vect_b1 = core.mv.Analyse(super1_pre, blksize=blksize, isb=True, delta=1, overlap=overlap) - fix1 = core.mv.FlowInter(c1, super1, vect_b1, vect_f1, time=50 + direction * 25).std.SelectEvery(4, [0, 2]) + super1 = c1.mv.Super(pel=pel, levels=1) + vect_f1 = super1_pre.mv.Analyse(blksize=blksize, isb=False, delta=1, overlap=overlap) + vect_b1 = super1_pre.mv.Analyse(blksize=blksize, isb=True, delta=1, overlap=overlap) + fix1 = core.mv.FlowInter(c1, super1, vect_b1, vect_f1, time=50 + direction * 25).std.SelectEvery(cycle=4, offsets=[0, 2]) super2_pre = DitherLumaRebuild(c2, s0=1).mv.Super(pel=pel) - super2 = core.mv.Super(c2, pel=pel, levels=1) - vect_f2 = core.mv.Analyse(super2_pre, blksize=blksize, isb=False, delta=1, overlap=overlap) - vect_b2 = core.mv.Analyse(super2_pre, blksize=blksize, isb=True, delta=1, overlap=overlap) - fix2 = core.mv.FlowInter(c2, super2, vect_b2, vect_f2).std.SelectEvery(4, [0, 2]) + super2 = c2.mv.Super(pel=pel, levels=1) + vect_f2 = super2_pre.mv.Analyse(blksize=blksize, isb=False, delta=1, overlap=overlap) + vect_b2 = super2_pre.mv.Analyse(blksize=blksize, isb=True, delta=1, overlap=overlap) + fix2 = core.mv.FlowInter(c2, super2, vect_b2, vect_f2).std.SelectEvery(cycle=4, offsets=[0, 2]) if pattern == 0: return core.std.Interleave([fix1, fix2]) @@ -2278,6 +2307,7 @@ def ivtc_txt30mc(src, frame_ref, draft=False, tff=None, opencl=False, device=Non def ivtc_txt60mc(src, frame_ref, srcbob=False, draft=False, tff=None, opencl=False, device=None): if not isinstance(src, vs.VideoNode): raise vs.Error('ivtc_txt60mc: This is not a clip') + if not (srcbob or isinstance(tff, bool)): raise vs.Error("ivtc_txt60mc: 'tff' must be set when srcbob=False. Setting tff to true means top field first and false means bottom field first") @@ -2296,21 +2326,21 @@ def ivtc_txt60mc(src, frame_ref, srcbob=False, draft=False, tff=None, opencl=Fal last = QTGMC(src, TR0=1, TR1=1, TR2=1, SourceMatch=3, Lossless=2, TFF=tff, opencl=opencl, device=device) if invpos > 1: - clean = core.std.AssumeFPS(core.std.Trim(last, 0, 0) + core.std.SelectEvery(last, 5, [6 - invpos]), fpsnum=12000, fpsden=1001) + clean = core.std.AssumeFPS(last.std.Trim(length=1) + last.std.SelectEvery(cycle=5, offsets=[6 - invpos]), fpsnum=12000, fpsden=1001) else: - clean = core.std.SelectEvery(last, 5, [1 - invpos]) + clean = last.std.SelectEvery(cycle=5, offsets=[1 - invpos]) if invpos > 3: - jitter = core.std.AssumeFPS(core.std.Trim(last, 0, 0) + core.std.SelectEvery(last, 5, [4 - invpos, 8 - invpos]), fpsnum=24000, fpsden=1001) + jitter = core.std.AssumeFPS(last.std.Trim(length=1) + last.std.SelectEvery(cycle=5, offsets=[4 - invpos, 8 - invpos]), fpsnum=24000, fpsden=1001) else: - jitter = core.std.SelectEvery(last, 5, [3 - invpos, 4 - invpos]) + jitter = last.std.SelectEvery(cycle=5, offsets=[3 - invpos, 4 - invpos]) jsup_pre = DitherLumaRebuild(jitter, s0=1).mv.Super(pel=pel) - jsup = core.mv.Super(jitter, pel=pel, levels=1) - vect_f = core.mv.Analyse(jsup_pre, blksize=blksize, isb=False, delta=1, overlap=overlap) - vect_b = core.mv.Analyse(jsup_pre, blksize=blksize, isb=True, delta=1, overlap=overlap) + jsup = jitter.mv.Super(pel=pel, levels=1) + vect_f = jsup_pre.mv.Analyse(blksize=blksize, isb=False, delta=1, overlap=overlap) + vect_b = jsup_pre.mv.Analyse(blksize=blksize, isb=True, delta=1, overlap=overlap) comp = core.mv.FlowInter(jitter, jsup, vect_b, vect_f) fixed = comp[::2] last = core.std.Interleave([clean, fixed]) - return core.std.Trim(last, invpos // 2) + return last.std.Trim(first=invpos // 2) ################################################# @@ -2366,8 +2396,9 @@ def ivtc_txt60mc(src, frame_ref, srcbob=False, draft=False, tff=None, opencl=Fal def logoNR(dlg, src, chroma=True, l=0, t=0, r=0, b=0, d=1, a=2, s=2, h=3): if not (isinstance(dlg, vs.VideoNode) and isinstance(src, vs.VideoNode)): raise vs.Error('logoNR: This is not a clip') + if dlg.format.id != src.format.id: - raise vs.Error('logoNR: clips must have the same format') + raise vs.Error('logoNR: Clips must be the same format') if dlg.format.color_family == vs.GRAY: chroma = False @@ -2381,15 +2412,15 @@ def logoNR(dlg, src, chroma=True, l=0, t=0, r=0, b=0, d=1, a=2, s=2, h=3): b_crop = (l != 0) or (t != 0) or (r != 0) or (b != 0) if b_crop: - src = core.std.Crop(src, l, r, t, b) - last = core.std.Crop(dlg, l, r, t, b) + src = src.std.Crop(left=l, right=r, top=t, bottom=b) + last = dlg.std.Crop(left=l, right=r, top=t, bottom=b) else: last = dlg if chroma: clp_nr = KNLMeansCL(last, d=d, a=a, s=s, h=h) else: - clp_nr = core.knlm.KNLMeansCL(last, d=d, a=a, s=s, h=h) + clp_nr = last.knlm.KNLMeansCL(d=d, a=a, s=s, h=h) logoM = mt_expand_multi(core.std.Expr([last, src], expr=['x y - abs 16 *']), mode='losange', sw=3, sh=3).std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]).std.Deflate() clp_nr = core.std.MaskedMerge(last, clp_nr, logoM) if b_crop: @@ -2408,8 +2439,12 @@ def Vinverse(clp, sstr=2.7, amnt=255, chroma=True): if not isinstance(clp, vs.VideoNode): raise vs.Error('Vinverse: This is not a clip') - neutral = 1 << (clp.format.bits_per_sample - 1) - peak = (1 << clp.format.bits_per_sample) - 1 + if clp.format.sample_type == vs.INTEGER: + neutral = 1 << (clp.format.bits_per_sample - 1) + peak = (1 << clp.format.bits_per_sample) - 1 + else: + neutral = 0.0 + peak = 1.0 if not chroma and clp.format.color_family != vs.GRAY: clp_orig = clp @@ -2417,9 +2452,9 @@ def Vinverse(clp, sstr=2.7, amnt=255, chroma=True): else: clp_orig = None - vblur = core.std.Convolution(clp, matrix=[50, 99, 50], mode='v') + vblur = clp.std.Convolution(matrix=[50, 99, 50], mode='v') vblurD = core.std.MakeDiff(clp, vblur) - vshrp = core.std.Expr([vblur, core.std.Convolution(vblur, matrix=[1, 4, 6, 4, 1], mode='v')], expr=[f'x x y - {sstr} * +']) + vshrp = core.std.Expr([vblur, vblur.std.Convolution(matrix=[1, 4, 6, 4, 1], mode='v')], expr=[f'x x y - {sstr} * +']) vshrpD = core.std.MakeDiff(vshrp, vblur) expr = f'x {neutral} - y {neutral} - * 0 < x {neutral} - abs y {neutral} - abs < x y ? {neutral} - 0.25 * {neutral} + x {neutral} - abs y {neutral} - abs < x y ? ?' vlimD = core.std.Expr([vshrpD, vblurD], expr=[expr]) @@ -2438,8 +2473,12 @@ def Vinverse2(clp, sstr=2.7, amnt=255, chroma=True): if not isinstance(clp, vs.VideoNode): raise vs.Error('Vinverse2: This is not a clip') - neutral = 1 << (clp.format.bits_per_sample - 1) - peak = (1 << clp.format.bits_per_sample) - 1 + if clp.format.sample_type == vs.INTEGER: + neutral = 1 << (clp.format.bits_per_sample - 1) + peak = (1 << clp.format.bits_per_sample) - 1 + else: + neutral = 0.0 + peak = 1.0 if not chroma and clp.format.color_family != vs.GRAY: clp_orig = clp @@ -2449,7 +2488,7 @@ def Vinverse2(clp, sstr=2.7, amnt=255, chroma=True): vblur = sbrV(clp) vblurD = core.std.MakeDiff(clp, vblur) - vshrp = core.std.Expr([vblur, core.std.Convolution(vblur, matrix=[1, 2, 1], mode='v')], expr=[f'x x y - {sstr} * +']) + vshrp = core.std.Expr([vblur, vblur.std.Convolution(matrix=[1, 2, 1], mode='v')], expr=[f'x x y - {sstr} * +']) vshrpD = core.std.MakeDiff(vshrp, vblur) expr = f'x {neutral} - y {neutral} - * 0 < x {neutral} - abs y {neutral} - abs < x y ? {neutral} - 0.25 * {neutral} + x {neutral} - abs y {neutral} - abs < x y ? ?' vlimD = core.std.Expr([vshrpD, vblurD], expr=[expr]) @@ -2533,8 +2572,8 @@ def YDifferenceToNext(n, f, clips): cthresh = scale(cthresh, peak) maxdiff = scale(maxdiff, peak) - input_minus = core.std.DuplicateFrames(input, [0]) - input_plus = core.std.Trim(input, 1) + core.std.Trim(input, input.num_frames - 1) + input_minus = input.std.DuplicateFrames(frames=[0]) + input_plus = input.std.Trim(first=1) + input.std.Trim(first=input.num_frames - 1) input_y = mvf.GetPlane(input, 0) input_minus_y = mvf.GetPlane(input_minus, 0) @@ -2548,14 +2587,14 @@ def YDifferenceToNext(n, f, clips): average_u = core.std.Expr([input_minus_u, input_plus_u], expr=[f'x y - abs {cthresh} < {peak} 0 ?']) average_v = core.std.Expr([input_minus_v, input_plus_v], expr=[f'x y - abs {cthresh} < {peak} 0 ?']) - ymask = core.std.Binarize(average_y, threshold=1 << shift) + ymask = average_y.std.Binarize(threshold=1 << shift) if usemaxdiff: diffplus_y = core.std.Expr([input_plus_y, input_y], expr=[f'x y - abs {maxdiff} < {peak} 0 ?']) diffminus_y = core.std.Expr([input_minus_y, input_y], expr=[f'x y - abs {maxdiff} < {peak} 0 ?']) diffs_y = core.std.Lut2(diffplus_y, diffminus_y, function=lambda x, y: x & y) ymask = core.std.Lut2(ymask, diffs_y, function=lambda x, y: x & y) - cmask = core.std.Lut2(core.std.Binarize(average_u, threshold=129 << shift), core.std.Binarize(average_v, threshold=129 << shift), function=lambda x, y: x & y) - cmask = core.resize.Point(cmask, input.width, input.height) + cmask = core.std.Lut2(average_u.std.Binarize(threshold=129 << shift), average_v.std.Binarize(threshold=129 << shift), function=lambda x, y: x & y) + cmask = cmask.resize.Point(input.width, input.height) themask = core.std.Lut2(ymask, cmask, function=lambda x, y: x & y) @@ -2563,9 +2602,9 @@ def YDifferenceToNext(n, f, clips): output = core.std.ShufflePlanes([core.std.MaskedMerge(input_y, fixed_y, themask), input], planes=[0, 1, 2], colorfamily=input.format.color_family) - input = SCDetect(input, scnchg / 255) - output = core.std.FrameEval(output, eval=partial(YDifferenceFromPrevious, clips=[input, output]), prop_src=input) - output = core.std.FrameEval(output, eval=partial(YDifferenceToNext, clips=[input, output]), prop_src=input) + input = SCDetect(input, threshold=scnchg / 255) + output = output.std.FrameEval(eval=partial(YDifferenceFromPrevious, clips=[input, output]), prop_src=input) + output = output.std.FrameEval(eval=partial(YDifferenceToNext, clips=[input, output]), prop_src=input) if mask: return themask @@ -2624,8 +2663,8 @@ def LUTDeRainbow(input, cthresh=10, ythresh=10, y=True, linkUV=True, mask=False) cthresh = scale(cthresh, peak) ythresh = scale(ythresh, peak) - input_minus = core.std.DuplicateFrames(input, [0]) - input_plus = core.std.Trim(input, 1) + core.std.Trim(input, input.num_frames - 1) + input_minus = input.std.DuplicateFrames(frames=[0]) + input_plus = input.std.Trim(first=1) + input.std.Trim(first=input.num_frames - 1) input_u = mvf.GetPlane(input, 1) input_v = mvf.GetPlane(input, 2) @@ -2640,8 +2679,8 @@ def LUTDeRainbow(input, cthresh=10, ythresh=10, y=True, linkUV=True, mask=False) average_u = core.std.Expr([input_minus_u, input_plus_u], expr=[f'x y - abs {cthresh} < x y + 2 / 0 ?']) average_v = core.std.Expr([input_minus_v, input_plus_v], expr=[f'x y - abs {cthresh} < x y + 2 / 0 ?']) - umask = core.std.Binarize(average_u, threshold=21 << shift) - vmask = core.std.Binarize(average_v, threshold=21 << shift) + umask = average_u.std.Binarize(threshold=21 << shift) + vmask = average_v.std.Binarize(threshold=21 << shift) themask = core.std.Lut2(umask, vmask, function=lambda x, y: x & y) if y: umask = core.std.Lut2(umask, average_y, function=lambda x, y: x & y) @@ -2657,7 +2696,7 @@ def LUTDeRainbow(input, cthresh=10, ythresh=10, y=True, linkUV=True, mask=False) output = core.std.ShufflePlanes([input, output_u, output_v], planes=[0, 0, 0], colorfamily=input.format.color_family) if mask: - return core.resize.Point(themask, input.width, input.height) + return themask.resize.Point(input.width, input.height) else: return output @@ -2672,8 +2711,8 @@ def Stab(clp, dxmax=4, dymax=4, mirror=0): temp = AverageFrames(clp, weights=[1] * 15, scenechange=25 / 255) inter = core.std.Interleave([core.rgvs.Repair(temp, AverageFrames(clp, weights=[1] * 3, scenechange=25 / 255), mode=[1]), clp]) - mdata = core.mv.DepanEstimate(inter, trust=0, dxmax=dxmax, dymax=dymax) - last = core.mv.DepanCompensate(inter, data=mdata, offset=-1, mirror=mirror) + mdata = inter.mv.DepanEstimate(trust=0, dxmax=dxmax, dymax=dymax) + last = inter.mv.DepanCompensate(data=mdata, offset=-1, mirror=mirror) return last[::2] @@ -2691,22 +2730,26 @@ def Stab(clp, dxmax=4, dymax=4, mirror=0): ### radius (int) - Temporal radius of MDegrain for grain stabilize (1-3). Default is 1 ### adapt (int) - Threshold for luma-adaptative mask. -1: off, 0: source, 255: invert. Or define your own luma mask clip "Lmask". Default is -1 ### rep (int) - Mode of repair to avoid artifacts, set 0 to turn off this operation. Default is 13 -### planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. Default is [0, 1, 2] +### planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. ### ###### -def GSMC(input, p=None, Lmask=None, nrmode=None, radius=1, adapt=-1, rep=13, planes=[0, 1, 2], thSAD=300, thSADC=None, thSCD1=300, thSCD2=100, limit=None, limitc=None): +def GSMC(input, p=None, Lmask=None, nrmode=None, radius=1, adapt=-1, rep=13, planes=None, thSAD=300, thSADC=None, thSCD1=300, thSCD2=100, limit=None, limitc=None): if not isinstance(input, vs.VideoNode): raise vs.Error('GSMC: This is not a clip') + if p is not None and (not isinstance(p, vs.VideoNode) or p.format.id != input.format.id): raise vs.Error("GSMC: 'p' must be the same format as input") + if Lmask is not None and not isinstance(Lmask, vs.VideoNode): raise vs.Error("GSMC: 'Lmask' is not a clip") neutral = 1 << (input.format.bits_per_sample - 1) peak = (1 << input.format.bits_per_sample) - 1 - if input.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + + if planes is None: + planes = list(range(input.format.num_planes)) + + elif isinstance(planes, int): planes = [planes] HD = input.width > 1024 or input.height > 576 @@ -2743,24 +2786,24 @@ def GSMC(input, p=None, Lmask=None, nrmode=None, radius=1, adapt=-1, rep=13, pla if p is not None: pre_nr = p elif nrmode <= 0: - pre_nr = core.std.Convolution(input, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1], planes=planes) + pre_nr = input.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1], planes=planes) else: - pre_nr = sbr(input, nrmode, planes=planes) + pre_nr = sbr(input, r=nrmode, planes=planes) dif_nr = core.std.MakeDiff(input, pre_nr, planes=planes) # Kernel: MC Grain Stabilize psuper = DitherLumaRebuild(pre_nr, s0=1, chroma=chromamv).mv.Super(pel=1, chroma=chromamv) - difsuper = core.mv.Super(dif_nr, pel=1, levels=1, chroma=chromamv) + difsuper = dif_nr.mv.Super(pel=1, levels=1, chroma=chromamv) analyse_args = dict(blksize=blksize, chroma=chromamv, truemotion=False, _global=True, overlap=overlap) - fv1 = core.mv.Analyse(psuper, isb=False, delta=1, **analyse_args) - bv1 = core.mv.Analyse(psuper, isb=True, delta=1, **analyse_args) + fv1 = psuper.mv.Analyse(isb=False, delta=1, **analyse_args) + bv1 = psuper.mv.Analyse(isb=True, delta=1, **analyse_args) if radius >= 2: - fv2 = core.mv.Analyse(psuper, isb=False, delta=2, **analyse_args) - bv2 = core.mv.Analyse(psuper, isb=True, delta=2, **analyse_args) + fv2 = psuper.mv.Analyse(isb=False, delta=2, **analyse_args) + bv2 = psuper.mv.Analyse(isb=True, delta=2, **analyse_args) if radius >= 3: - fv3 = core.mv.Analyse(psuper, isb=False, delta=3, **analyse_args) - bv3 = core.mv.Analyse(psuper, isb=True, delta=3, **analyse_args) + fv3 = psuper.mv.Analyse(isb=False, delta=3, **analyse_args) + bv3 = psuper.mv.Analyse(isb=True, delta=3, **analyse_args) degrain_args = dict(thsad=thSAD, thsadc=thSADC, plane=plane, limit=limit, limitc=limitc, thscd1=thSCD1, thscd2=thSCD2) if radius <= 1: @@ -2782,12 +2825,12 @@ def GSMC(input, p=None, Lmask=None, nrmode=None, radius=1, adapt=-1, rep=13, pla else: input_y = mvf.GetPlane(input, 0) if adapt == 0: - Lmask = core.std.Convolution(input_y, matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) + Lmask = input_y.std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) elif adapt >= 255: - Lmask = core.std.Invert(input_y).std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) + Lmask = input_y.std.Invert().std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) else: expr = 'x {adapt} - abs {peak} * {adapt} {neutral} - abs {neutral} + /'.format(adapt=scale(adapt, peak), peak=peak, neutral=neutral) - Lmask = core.std.Expr([input_y], expr=[expr]).std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) + Lmask = input_y.std.Expr(expr=[expr]).std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1]) return core.std.MaskedMerge(input, stable, Lmask, planes=planes) @@ -2813,7 +2856,7 @@ def GSMC(input, p=None, Lmask=None, nrmode=None, radius=1, adapt=-1, rep=13, pla ### edgeclean, ECrad, ECthr, ### stabilize, maxr, TTstr, ### bwbh, owoh, blksize, overlap, -### bt, +### bt, ### thSAD, thSADC, thSAD2, thSADC2, thSCD1, thSCD2, ### truemotion, MVglobal, pel, pelsearch, search, searchparam, MVsharp, DCT, ### p, settings) @@ -2987,9 +3030,11 @@ def MCTemporalDenoise(i, radius=None, pfMode=3, sigma=None, twopass=None, useTTm if p is not None and (not isinstance(p, vs.VideoNode) or p.format.id != i.format.id): raise vs.Error("MCTemporalDenoise: 'p' must be the same format as input") + isGray = (i.format.color_family == vs.GRAY) + neutral = 1 << (i.format.bits_per_sample - 1) peak = (1 << i.format.bits_per_sample) - 1 - isGray = (i.format.color_family == vs.GRAY) + ### DEFAULTS try: @@ -3073,20 +3118,20 @@ def MCTemporalDenoise(i, radius=None, pfMode=3, sigma=None, twopass=None, useTTm yn = int(yi + yf) pointresize_args = dict(width=xn, height=yn, src_left=-xf / 2, src_top=-yf / 2, src_width=xn, src_height=yn) - i = core.resize.Point(i, **pointresize_args) + i = i.resize.Point(**pointresize_args) ### PREFILTERING fft3d_args = dict(planes=planes, bw=bwbh, bh=bwbh, bt=bt, ow=owoh, oh=owoh) if p is not None: - p = core.resize.Point(p, **pointresize_args) + p = p.resize.Point(**pointresize_args) elif pfMode <= -1: p = i elif pfMode == 0: - p = neo_fft3d.FFT3D(i, sigma=sigma * 0.8, sigma2=sigma * 0.6, sigma3=sigma * 0.4, sigma4=sigma * 0.2, **fft3d_args) + p = i.neo_fft3d.FFT3D(sigma=sigma * 0.8, sigma2=sigma * 0.6, sigma3=sigma * 0.4, sigma4=sigma * 0.2, **fft3d_args) elif pfMode >= 3: - p = core.dfttest.DFTTest(i, tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0], planes=planes) + p = i.dfttest.DFTTest(tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0], planes=planes) else: - p = MinBlur(i, pfMode, planes=planes) + p = MinBlur(i, r=pfMode, planes=planes) pD = core.std.MakeDiff(i, p, planes=planes) p = DitherLumaRebuild(p, s0=1, chroma=chroma) @@ -3096,50 +3141,50 @@ def MCTemporalDenoise(i, radius=None, pfMode=3, sigma=None, twopass=None, useTTm if not deblock: d = i elif useQED: - d = Deblock_QED(core.std.Crop(i, **crop_args), quant1=quant1, quant2=quant2, uv=3 if chroma else 2).resize.Point(**pointresize_args) + d = Deblock_QED(i.std.Crop(**crop_args), quant1=quant1, quant2=quant2, uv=3 if chroma else 2).resize.Point(**pointresize_args) else: - d = core.std.Crop(i, **crop_args).deblock.Deblock(quant=(quant1 + quant2) // 2, planes=planes).resize.Point(**pointresize_args) + d = i.std.Crop(**crop_args).deblock.Deblock(quant=(quant1 + quant2) // 2, planes=planes).resize.Point(**pointresize_args) ### PREPARING super_args = dict(hpad=0, vpad=0, pel=pel, chroma=chroma, sharp=MVsharp) - pMVS = core.mv.Super(p, rfilter=4 if refine else 2, **super_args) + pMVS = p.mv.Super(rfilter=4 if refine else 2, **super_args) if refine: - rMVS = core.mv.Super(p, levels=1, **super_args) + rMVS = p.mv.Super(levels=1, **super_args) analyse_args = dict(blksize=blksize, search=search, searchparam=searchparam, pelsearch=pelsearch, chroma=chroma, truemotion=truemotion, _global=MVglobal, overlap=overlap, dct=DCT) recalculate_args = dict(thsad=thSAD // 2, blksize=max(blksize // 2, 4), search=search, chroma=chroma, truemotion=truemotion, overlap=max(overlap // 2, 2), dct=DCT) - f1v = core.mv.Analyse(pMVS, isb=False, delta=1, **analyse_args) - b1v = core.mv.Analyse(pMVS, isb=True, delta=1, **analyse_args) + f1v = pMVS.mv.Analyse(isb=False, delta=1, **analyse_args) + b1v = pMVS.mv.Analyse(isb=True, delta=1, **analyse_args) if refine: f1v = core.mv.Recalculate(rMVS, f1v, **recalculate_args) b1v = core.mv.Recalculate(rMVS, b1v, **recalculate_args) if radius > 1: - f2v = core.mv.Analyse(pMVS, isb=False, delta=2, **analyse_args) - b2v = core.mv.Analyse(pMVS, isb=True, delta=2, **analyse_args) + f2v = pMVS.mv.Analyse(isb=False, delta=2, **analyse_args) + b2v = pMVS.mv.Analyse(isb=True, delta=2, **analyse_args) if refine: f2v = core.mv.Recalculate(rMVS, f2v, **recalculate_args) b2v = core.mv.Recalculate(rMVS, b2v, **recalculate_args) if radius > 2: - f3v = core.mv.Analyse(pMVS, isb=False, delta=3, **analyse_args) - b3v = core.mv.Analyse(pMVS, isb=True, delta=3, **analyse_args) + f3v = pMVS.mv.Analyse(isb=False, delta=3, **analyse_args) + b3v = pMVS.mv.Analyse(isb=True, delta=3, **analyse_args) if refine: f3v = core.mv.Recalculate(rMVS, f3v, **recalculate_args) b3v = core.mv.Recalculate(rMVS, b3v, **recalculate_args) if radius > 3: - f4v = core.mv.Analyse(pMVS, isb=False, delta=4, **analyse_args) - b4v = core.mv.Analyse(pMVS, isb=True, delta=4, **analyse_args) + f4v = pMVS.mv.Analyse(isb=False, delta=4, **analyse_args) + b4v = pMVS.mv.Analyse(isb=True, delta=4, **analyse_args) if refine: f4v = core.mv.Recalculate(rMVS, f4v, **recalculate_args) b4v = core.mv.Recalculate(rMVS, b4v, **recalculate_args) if radius > 4: - f5v = core.mv.Analyse(pMVS, isb=False, delta=5, **analyse_args) - b5v = core.mv.Analyse(pMVS, isb=True, delta=5, **analyse_args) + f5v = pMVS.mv.Analyse(isb=False, delta=5, **analyse_args) + b5v = pMVS.mv.Analyse(isb=True, delta=5, **analyse_args) if refine: f5v = core.mv.Recalculate(rMVS, f5v, **recalculate_args) b5v = core.mv.Recalculate(rMVS, b5v, **recalculate_args) if radius > 5: - f6v = core.mv.Analyse(pMVS, isb=False, delta=6, **analyse_args) - b6v = core.mv.Analyse(pMVS, isb=True, delta=6, **analyse_args) + f6v = pMVS.mv.Analyse(isb=False, delta=6, **analyse_args) + b6v = pMVS.mv.Analyse(isb=True, delta=6, **analyse_args) if refine: f6v = core.mv.Recalculate(rMVS, f6v, **recalculate_args) b6v = core.mv.Recalculate(rMVS, b6v, **recalculate_args) @@ -3202,7 +3247,7 @@ def MCTD_TTSM(i, iMVS, thSAD): # SAD_f6m = core.mv.Mask(i, f6v, **mask_args) # SAD_b6m = core.mv.Mask(i, b6v, **mask_args) - # b = core.std.BlankClip(i, color=[0] if isGray else [0, neutral, neutral]) + # b = i.std.BlankClip(color=[0] if isGray else [0, neutral, neutral]) if radius <= 1: c = core.std.Interleave([f1c, i, b1c]) # SAD_m = core.std.Interleave([SAD_f1m, b, SAD_b1m]) @@ -3222,12 +3267,12 @@ def MCTD_TTSM(i, iMVS, thSAD): c = core.std.Interleave([f6c, f5c, f4c, f3c, f2c, f1c, i, b1c, b2c, b3c, b4c, b5c, b6c]) # SAD_m = core.std.Interleave([SAD_f6m, SAD_f5m, SAD_f4m, SAD_f3m, SAD_f2m, SAD_f1m, b, SAD_b1m, SAD_b2m, SAD_b3m, SAD_b4m, SAD_b5m, SAD_b6m]) - # sm = core.ttmpsm.TTempSmooth(c, maxr=radius, thresh=[255], mdiff=[1], strength=radius + 1, scthresh=99.9, fp=False, pfclip=SAD_m, planes=planes) - sm = core.ttmpsm.TTempSmooth(c, maxr=radius, thresh=[255], mdiff=[1], strength=radius + 1, scthresh=99.9, fp=False, planes=planes) - return core.std.SelectEvery(sm, radius * 2 + 1, [radius]) + # sm = c.ttmpsm.TTempSmooth(maxr=radius, thresh=[255], mdiff=[1], strength=radius + 1, scthresh=99.9, fp=False, pfclip=SAD_m, planes=planes) + sm = c.ttmpsm.TTempSmooth(maxr=radius, thresh=[255], mdiff=[1], strength=radius + 1, scthresh=99.9, fp=False, planes=planes) + return sm.std.SelectEvery(cycle=radius * 2 + 1, offsets=[radius]) ### DENOISING: FIRST PASS - dMVS = core.mv.Super(d, levels=1, **super_args) + dMVS = d.mv.Super(levels=1, **super_args) sm = MCTD_TTSM(d, dMVS, thSAD) if useTTmpSm else MCTD_MVD(d, dMVS, thSAD, thSADC) if limit <= -1: @@ -3243,7 +3288,7 @@ def MCTD_TTSM(i, iMVS, thSAD): ### DENOISING: SECOND PASS if twopass: - smLMVS = core.mv.Super(smL, levels=1, **super_args) + smLMVS = smL.mv.Super(levels=1, **super_args) sm = MCTD_TTSM(smL, smLMVS, thSAD2) if useTTmpSm else MCTD_MVD(smL, smLMVS, thSAD2, thSADC2) if limit2 <= -1: @@ -3261,7 +3306,7 @@ def MCTD_TTSM(i, iMVS, thSAD): if post <= 0: smP = smL else: - smP = neo_fft3d.FFT3D(smL, sigma=post * 0.8, sigma2=post * 0.6, sigma3=post * 0.4, sigma4=post * 0.2, **fft3d_args) + smP = smL.neo_fft3d.FFT3D(sigma=post * 0.8, sigma2=post * 0.6, sigma3=post * 0.4, sigma4=post * 0.2, **fft3d_args) ### EDGECLEANING if edgeclean: @@ -3280,7 +3325,7 @@ def MCTD_TTSM(i, iMVS, thSAD): smP = core.std.MaskedMerge(TTc, smP, mF, planes=planes) ### OUTPUT - return core.std.Crop(smP, **crop_args) + return smP.std.Crop(**crop_args) ################################################################################################ @@ -3312,11 +3357,13 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha if not isinstance(input, vs.VideoNode): raise vs.Error('SMDegrain: This is not a clip') - peak = (1 << input.format.bits_per_sample) - 1 + if input.format.color_family == vs.GRAY: plane = 0 chroma = False + peak = (1 << input.format.bits_per_sample) - 1 + # Defaults & Conditionals thSAD2 = thSAD // 2 if thSADC is None: @@ -3396,7 +3443,7 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha if not interlaced: inputP = input else: - inputP = core.std.SeparateFields(input, tff) + inputP = input.std.SeparateFields(tff=tff) # Prefilter & Motion Filter if mfilter is None: @@ -3407,11 +3454,11 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha pref = prefilter elif prefilter <= -1: pref = inputP - elif prefilter == 0: - pref = MinBlur(inputP, 0, planes=planes) + + elif prefilter == 3: expr = 'x {i} < {peak} x {j} > 0 {peak} x {i} - {peak} {j} {i} - / * - ? ?'.format(i=scale(16, peak), j=scale(75, peak), peak=peak) - pref = core.std.MaskedMerge(core.dfttest.DFTTest(inputP, tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0], planes=planes), + pref = core.std.MaskedMerge(inputP.dfttest.DFTTest(tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0], planes=planes), inputP, mvf.GetPlane(inputP, 0).std.Expr(expr=[expr]), planes=planes) @@ -3419,9 +3466,9 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha if chroma: pref = KNLMeansCL(inputP, d=1, a=1, h=7) else: - pref = core.knlm.KNLMeansCL(inputP, d=1, a=1, h=7) + pref = inputP.knlm.KNLMeansCL(d=1, a=1, h=7) else: - pref = sbr(inputP, prefilter, planes=planes) + pref = MinBlur(inputP, r=prefilter, planes=planes) else: pref = inputP @@ -3445,48 +3492,48 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha recalculate_args = dict(thsad=halfthSAD, blksize=halfblksize, search=search, chroma=chroma, truemotion=truemotion, overlap=halfoverlap, dct=dct) if pelclip: - super_search = core.mv.Super(pref, chroma=chroma, rfilter=4, pelclip=pclip, **super_args) + super_search = pref.mv.Super(chroma=chroma, rfilter=4, pelclip=pclip, **super_args) else: - super_search = core.mv.Super(pref, chroma=chroma, sharp=subpixel, rfilter=4, **super_args) + super_search = pref.mv.Super(chroma=chroma, sharp=subpixel, rfilter=4, **super_args) if not GlobalR: if pelclip: - super_render = core.mv.Super(inputP, levels=1, chroma=plane0, pelclip=pclip2, **super_args) + super_render = inputP.mv.Super(levels=1, chroma=plane0, pelclip=pclip2, **super_args) if RefineMotion: - Recalculate = core.mv.Super(pref, levels=1, chroma=chroma, pelclip=pclip, **super_args) + Recalculate = pref.mv.Super(levels=1, chroma=chroma, pelclip=pclip, **super_args) else: - super_render = core.mv.Super(inputP, levels=1, chroma=plane0, sharp=subpixel, **super_args) + super_render = inputP.mv.Super(levels=1, chroma=plane0, sharp=subpixel, **super_args) if RefineMotion: - Recalculate = core.mv.Super(pref, levels=1, chroma=chroma, sharp=subpixel, **super_args) + Recalculate = pref.mv.Super(levels=1, chroma=chroma, sharp=subpixel, **super_args) if interlaced: if tr > 2: - bv6 = core.mv.Analyse(super_search, isb=True, delta=6, **analyse_args) - fv6 = core.mv.Analyse(super_search, isb=False, delta=6, **analyse_args) + bv6 = super_search.mv.Analyse(isb=True, delta=6, **analyse_args) + fv6 = super_search.mv.Analyse(isb=False, delta=6, **analyse_args) if RefineMotion: bv6 = core.mv.Recalculate(Recalculate, bv6, **recalculate_args) fv6 = core.mv.Recalculate(Recalculate, fv6, **recalculate_args) if tr > 1: - bv4 = core.mv.Analyse(super_search, isb=True, delta=4, **analyse_args) - fv4 = core.mv.Analyse(super_search, isb=False, delta=4, **analyse_args) + bv4 = super_search.mv.Analyse(isb=True, delta=4, **analyse_args) + fv4 = super_search.mv.Analyse(isb=False, delta=4, **analyse_args) if RefineMotion: bv4 = core.mv.Recalculate(Recalculate, bv4, **recalculate_args) fv4 = core.mv.Recalculate(Recalculate, fv4, **recalculate_args) else: if tr > 2: - bv3 = core.mv.Analyse(super_search, isb=True, delta=3, **analyse_args) - fv3 = core.mv.Analyse(super_search, isb=False, delta=3, **analyse_args) + bv3 = super_search.mv.Analyse(isb=True, delta=3, **analyse_args) + fv3 = super_search.mv.Analyse(isb=False, delta=3, **analyse_args) if RefineMotion: bv3 = core.mv.Recalculate(Recalculate, bv3, **recalculate_args) fv3 = core.mv.Recalculate(Recalculate, fv3, **recalculate_args) - bv1 = core.mv.Analyse(super_search, isb=True, delta=1, **analyse_args) - fv1 = core.mv.Analyse(super_search, isb=False, delta=1, **analyse_args) + bv1 = super_search.mv.Analyse(isb=True, delta=1, **analyse_args) + fv1 = super_search.mv.Analyse(isb=False, delta=1, **analyse_args) if RefineMotion: bv1 = core.mv.Recalculate(Recalculate, bv1, **recalculate_args) fv1 = core.mv.Recalculate(Recalculate, fv1, **recalculate_args) if interlaced or tr > 1: - bv2 = core.mv.Analyse(super_search, isb=True, delta=2, **analyse_args) - fv2 = core.mv.Analyse(super_search, isb=False, delta=2, **analyse_args) + bv2 = super_search.mv.Analyse(isb=True, delta=2, **analyse_args) + fv2 = super_search.mv.Analyse(isb=False, delta=2, **analyse_args) if RefineMotion: bv2 = core.mv.Recalculate(Recalculate, bv2, **recalculate_args) fv2 = core.mv.Recalculate(Recalculate, fv2, **recalculate_args) @@ -3515,7 +3562,7 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha if not GlobalO and if0: if if1: if interlaced: - CClip = core.std.SeparateFields(CClip, tff) + CClip = CClip.std.SeparateFields(tff=tff) else: CClip = inputP @@ -3524,15 +3571,15 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha if if0: if interlaced: if ifC: - return Weave(ContraSharpening(output, CClip, planes=planes), tff) + return Weave(ContraSharpening(output, CClip, planes=planes), tff=tff) else: - return Weave(LSFmod(output, strength=contrasharp, source=CClip, Lmode=0, soothe=False, defaults='slow'), tff) + return Weave(LSFmod(output, strength=contrasharp, source=CClip, Lmode=0, soothe=False, defaults='slow'), tff=tff) elif ifC: return ContraSharpening(output, CClip, planes=planes) else: return LSFmod(output, strength=contrasharp, source=CClip, Lmode=0, soothe=False, defaults='slow') elif interlaced: - return Weave(output, tff) + return Weave(output, tff=tff) else: return output else: @@ -3549,15 +3596,17 @@ def SMDegrain(input, tr=2, thSAD=300, thSADC=None, RefineMotion=False, contrasha # tlimit (int) - The temporal filter won't change a pixel more than this. Default is 3 # tbias (int) - The percentage of the temporal filter that will apply. Default is 49 # back (int) - After all changes have been calculated, reduce all pixel changes by this value (shift "back" towards original value). Default is 1 -# planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. Default is [0, 1, 2] -def STPresso(clp, limit=3, bias=24, RGmode=4, tthr=12, tlimit=3, tbias=49, back=1, planes=[0, 1, 2]): +# planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. +def STPresso(clp, limit=3, bias=24, RGmode=4, tthr=12, tlimit=3, tbias=49, back=1, planes=None): if not isinstance(clp, vs.VideoNode): raise vs.Error('STPresso: This is not a clip') peak = (1 << clp.format.bits_per_sample) - 1 - if clp.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + + if planes is None: + planes = list(range(clp.format.num_planes)) + + elif isinstance(planes, int): planes = [planes] limit = scale(limit, peak) @@ -3578,34 +3627,37 @@ def STPresso(clp, limit=3, bias=24, RGmode=4, tthr=12, tlimit=3, tbias=49, back= texpr = f'x y - abs {scale(1, peak)} < x x {TLIM} + y < x {tlimit} + x {TLIM} - y > x {tlimit} - x {100 - tbias} * y {tbias} * + 100 / ? ? ?' if RGmode == 4: - bzz = core.std.Median(clp, planes=planes) + bzz = clp.std.Median(planes=planes) elif RGmode in [11, 12]: - bzz = core.std.Convolution(clp, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=planes) + bzz = clp.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=planes) elif RGmode == 19: - bzz = core.std.Convolution(clp, matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=planes) + bzz = clp.std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1], planes=planes) elif RGmode == 20: - bzz = core.std.Convolution(clp, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1], planes=planes) + bzz = clp.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1], planes=planes) else: - bzz = core.rgvs.RemoveGrain(clp, mode=[RGmode if i in planes else 0 for i in range(clp.format.num_planes)]) + bzz = clp.rgvs.RemoveGrain(mode=[RGmode if i in planes else 0 for i in range(clp.format.num_planes)]) last = core.std.Expr([clp, bzz], expr=[expr if i in planes else '' for i in range(clp.format.num_planes)]) if tthr > 0: - last = core.std.Expr([last, core.std.MakeDiff(last, core.std.MakeDiff(bzz, core.flux.SmoothT(bzz, temporal_threshold=tthr, planes=planes), planes=planes), planes=planes)], + last = core.std.Expr([last, core.std.MakeDiff(last, core.std.MakeDiff(bzz, bzz.flux.SmoothT(temporal_threshold=tthr, planes=planes), planes=planes), planes=planes)], expr=[texpr if i in planes else '' for i in range(clp.format.num_planes)]) if back > 0: expr = f'x {back} + y < x {back} + x {back} - y > x {back} - y ? ?' - return core.std.Expr([last, clp], expr=[expr if i in planes else '' for i in range(clp.format.num_planes)]) - else: - return last + last = core.std.Expr([last, clp], expr=[expr if i in planes else '' for i in range(clp.format.num_planes)]) + + return last # a.k.a. BalanceBordersMod def bbmod(c, cTop, cBottom, cLeft, cRight, thresh=128, blur=999): if not isinstance(c, vs.VideoNode): raise vs.Error('bbmod: This is not a clip') + if c.format.color_family in [vs.GRAY, vs.RGB]: - raise vs.Error('bbmod: Gray and RGB color families are not supported') + raise vs.Error('bbmod: Gray and RGB formats are not supported') + if thresh <= 0: raise vs.Error('bbmod: thresh must be greater than 0') + if blur <= 0: raise vs.Error('bbmod: blur must be greater than 0') @@ -3620,41 +3672,41 @@ def btb(c, cTop): cTop = min(cTop, cHeight - 1) blurWidth = max(8, math.floor(cWidth / blur)) - c2 = core.resize.Point(c, cWidth * 2, cHeight * 2) + c2 = c.resize.Point(cWidth * 2, cHeight * 2) - last = core.std.CropAbs(c2, width=cWidth * 2, height=2, top=cTop * 2) - last = core.resize.Point(last, cWidth * 2, cTop * 2) - referenceBlurChroma = BicubicResize(BicubicResize(core.std.Expr([last], expr=[f'x {neutral} - abs 2 *', '']), blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) + last = c2.std.CropAbs(width=cWidth * 2, height=2, top=cTop * 2) + last = last.resize.Point(cWidth * 2, cTop * 2) + referenceBlurChroma = BicubicResize(BicubicResize(last.std.Expr(expr=[f'x {neutral} - abs 2 *', '']), blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) referenceBlur = BicubicResize(BicubicResize(last, blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) - original = core.std.CropAbs(c2, width=cWidth * 2, height=cTop * 2) + original = c2.std.CropAbs(width=cWidth * 2, height=cTop * 2) last = BicubicResize(original, blurWidth * 2, cTop * 2) - originalBlurChroma = BicubicResize(BicubicResize(core.std.Expr([last], expr=[f'x {neutral} - abs 2 *', '']), blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) + originalBlurChroma = BicubicResize(BicubicResize(last.std.Expr(expr=[f'x {neutral} - abs 2 *', '']), blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) originalBlur = BicubicResize(BicubicResize(last, blurWidth * 2, cTop * 2), cWidth * 2, cTop * 2) balancedChroma = core.std.Expr([original, originalBlurChroma, referenceBlurChroma], expr=['', f'z y / 8 min 0.4 max x {neutral} - * {neutral} +']) - expr = f'z {scale(16, peak)} - y {scale(16, peak)} - / 8 min 0.4 max x {scale(16, peak)} - * {scale(16, peak)} +' + expr = 'z {i} - y {i} - / 8 min 0.4 max x {i} - * {i} +'.format(i=scale(16, peak)) balancedLuma = core.std.Expr([balancedChroma, originalBlur, referenceBlur], expr=[expr, 'z y - x +']) difference = core.std.MakeDiff(balancedLuma, original) - difference = core.std.Expr([difference], expr=[f'x {scale(128 + thresh, peak)} min {scale(128 - thresh, peak)} max']) + difference = difference.std.Expr(expr=[f'x {scale(128 + thresh, peak)} min {scale(128 - thresh, peak)} max']) last = core.std.MergeDiff(original, difference) - return core.std.StackVertical([last, core.std.CropAbs(c2, width=cWidth * 2, height=(cHeight - cTop) * 2, top=cTop * 2)]).resize.Point(cWidth, cHeight) + return core.std.StackVertical([last, c2.std.CropAbs(width=cWidth * 2, height=(cHeight - cTop) * 2, top=cTop * 2)]).resize.Point(cWidth, cHeight) if cTop > 0: c = btb(c, cTop) - c = core.std.Transpose(c).std.FlipHorizontal() + c = c.std.Transpose().std.FlipHorizontal() if cLeft > 0: c = btb(c, cLeft) - c = core.std.Transpose(c).std.FlipHorizontal() + c = c.std.Transpose().std.FlipHorizontal() if cBottom > 0: c = btb(c, cBottom) - c = core.std.Transpose(c).std.FlipHorizontal() + c = c.std.Transpose().std.FlipHorizontal() if cRight > 0: c = btb(c, cRight) - return core.std.Transpose(c).std.FlipHorizontal() + return c.std.Transpose().std.FlipHorizontal() # Apply the inverse sigmoid curve to a clip in linear luminance @@ -3662,20 +3714,32 @@ def SigmoidInverse(src, thr=0.5, cont=6.5, planes=[0, 1, 2]): if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16: raise vs.Error('SigmoidInverse: This is not a 16-bit clip') + if thr < 0 or thr > 1: + raise vs.Error('SigmoidInverse: thr must be between 0.0 and 1.0 (inclusive)') + + if cont <= 0: + raise vs.Error('SigmoidInverse: cont must be greater than 0.0') + x0 = 1 / (1 + math.exp(cont * thr)) x1m0 = 1 / (1 + math.exp(cont * (thr - 1))) - x0 expr = f'{thr} 1 x 65536 / {x1m0} * {x0} + 0.000001 max / 1 - 0.000001 max log {cont} / - 65536 *' - return core.std.Expr([src], expr=[expr if i in planes else '' for i in range(src.format.num_planes)]) + return src.std.Expr(expr=[expr if i in planes else '' for i in range(src.format.num_planes)]) # Convert back a clip to linear luminance def SigmoidDirect(src, thr=0.5, cont=6.5, planes=[0, 1, 2]): if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16: raise vs.Error('SigmoidDirect: This is not a 16-bit clip') + if thr < 0 or thr > 1: + raise vs.Error('SigmoidDirect: thr must be between 0.0 and 1.0 (inclusive)') + + if cont <= 0: + raise vs.Error('SigmoidDirect: cont must be greater than 0.0') + x0 = 1 / (1 + math.exp(cont * thr)) x1m0 = 1 / (1 + math.exp(cont * (thr - 1))) - x0 expr = f'1 1 {cont} {thr} x 65536 / - * exp + / {x0} - {x1m0} / 65536 *' - return core.std.Expr([src], expr=[expr if i in planes else '' for i in range(src.format.num_planes)]) + return src.std.Expr(expr=[expr if i in planes else '' for i in range(src.format.num_planes)]) # Parameters: @@ -3697,11 +3761,16 @@ def SigmoidDirect(src, thr=0.5, cont=6.5, planes=[0, 1, 2]): def GrainFactory3(clp, g1str=7.0, g2str=5.0, g3str=3.0, g1shrp=60, g2shrp=66, g3shrp=80, g1size=1.5, g2size=1.2, g3size=0.9, temp_avg=0, ontop_grain=0.0, th1=24, th2=56, th3=128, th4=160): if not isinstance(clp, vs.VideoNode): raise vs.Error('GrainFactory3: This is not a clip') + if clp.format.color_family == vs.RGB: - raise vs.Error('GrainFactory3: RGB color family is not supported') + raise vs.Error('GrainFactory3: RGB format is not supported') - neutral = 1 << (clp.format.bits_per_sample - 1) - peak = (1 << clp.format.bits_per_sample) - 1 + if clp.format.sample_type == vs.INTEGER: + neutral = 1 << (clp.format.bits_per_sample - 1) + peak = (1 << clp.format.bits_per_sample) - 1 + else: + neutral = 0.0 + peak = 1.0 if clp.format.color_family != vs.GRAY: clp_orig = clp @@ -3742,34 +3811,36 @@ def GrainFactory3(clp, g1str=7.0, g2str=5.0, g3str=3.0, g1shrp=60, g2shrp=66, g3 th3 = scale(th3, peak) th4 = scale(th4, peak) - grainlayer1 = core.std.BlankClip(clp, width=sx1, height=sy1, color=[neutral]).grain.Add(g1str) + grainlayer1 = clp.std.BlankClip(width=sx1, height=sy1, color=[neutral]).grain.Add(var=g1str) if g1size != 1 and (sx1 != ox or sy1 != oy): if g1size > 1.5: - grainlayer1 = core.resize.Bicubic(grainlayer1, sx1a, sy1a, filter_param_a=b1a, filter_param_b=c1a).resize.Bicubic(ox, oy, filter_param_a=b1a, filter_param_b=c1a) + grainlayer1 = grainlayer1.resize.Bicubic(sx1a, sy1a, filter_param_a=b1a, filter_param_b=c1a).resize.Bicubic(ox, oy, filter_param_a=b1a, filter_param_b=c1a) else: - grainlayer1 = core.resize.Bicubic(grainlayer1, ox, oy, filter_param_a=b1, filter_param_b=c1) + grainlayer1 = grainlayer1.resize.Bicubic(ox, oy, filter_param_a=b1, filter_param_b=c1) - grainlayer2 = core.std.BlankClip(clp, width=sx2, height=sy2, color=[neutral]).grain.Add(g2str) + grainlayer2 = clp.std.BlankClip(width=sx2, height=sy2, color=[neutral]).grain.Add(var=g2str) if g2size != 1 and (sx2 != ox or sy2 != oy): if g2size > 1.5: - grainlayer2 = core.resize.Bicubic(grainlayer2, sx2a, sy2a, filter_param_a=b2a, filter_param_b=c2a).resize.Bicubic(ox, oy, filter_param_a=b2a, filter_param_b=c2a) + grainlayer2 = grainlayer2.resize.Bicubic(sx2a, sy2a, filter_param_a=b2a, filter_param_b=c2a).resize.Bicubic(ox, oy, filter_param_a=b2a, filter_param_b=c2a) else: - grainlayer2 = core.resize.Bicubic(grainlayer2, ox, oy, filter_param_a=b2, filter_param_b=c2) + grainlayer2 = grainlayer2.resize.Bicubic(ox, oy, filter_param_a=b2, filter_param_b=c2) - grainlayer3 = core.std.BlankClip(clp, width=sx3, height=sy3, color=[neutral]).grain.Add(g3str) + grainlayer3 = clp.std.BlankClip(width=sx3, height=sy3, color=[neutral]).grain.Add(var=g3str) if g3size != 1 and (sx3 != ox or sy3 != oy): if g3size > 1.5: - grainlayer3 = core.resize.Bicubic(grainlayer3, sx3a, sy3a, filter_param_a=b3a, filter_param_b=c3a).resize.Bicubic(ox, oy, filter_param_a=b3a, filter_param_b=c3a) + grainlayer3 = grainlayer3.resize.Bicubic(sx3a, sy3a, filter_param_a=b3a, filter_param_b=c3a).resize.Bicubic(ox, oy, filter_param_a=b3a, filter_param_b=c3a) else: - grainlayer3 = core.resize.Bicubic(grainlayer3, ox, oy, filter_param_a=b3, filter_param_b=c3) + grainlayer3 = grainlayer3.resize.Bicubic(ox, oy, filter_param_a=b3, filter_param_b=c3) expr1 = f'x {th1} < 0 x {th2} > {peak} {peak} {th2 - th1} / x {th1} - * ? ?' expr2 = f'x {th3} < 0 x {th4} > {peak} {peak} {th4 - th3} / x {th3} - * ? ?' - grainlayer = core.std.MaskedMerge(core.std.MaskedMerge(grainlayer1, grainlayer2, core.std.Expr([clp], expr=[expr1])), grainlayer3, core.std.Expr([clp], expr=[expr2])) + grainlayer = core.std.MaskedMerge(core.std.MaskedMerge(grainlayer1, grainlayer2, clp.std.Expr(expr=[expr1])), grainlayer3, clp.std.Expr(expr=[expr2])) + if temp_avg > 0: grainlayer = core.std.Merge(grainlayer, AverageFrames(grainlayer, weights=[1] * 3), weight=[tmpavg]) if ontop_grain > 0: - grainlayer = core.grain.Add(grainlayer, ontop_grain) + grainlayer = grainlayer.grain.Add(var=ontop_grain) + result = core.std.MakeDiff(clp, grainlayer) if clp_orig is not None: @@ -3793,10 +3864,13 @@ def InterFrame(Input, Preset='Medium', Tuning='Film', NewNum=None, NewDen=1, GPU Preset = Preset.lower() Tuning = Tuning.lower() InputType = InputType.upper() + if Preset not in ['medium', 'fast', 'faster', 'fastest']: raise vs.Error(f"InterFrame: '{Preset}' is not a valid preset") + if Tuning not in ['film', 'smooth', 'animation', 'weak']: raise vs.Error(f"InterFrame: '{Tuning}' is not a valid tuning") + if InputType not in ['2D', 'SBS', 'OU', 'HSBS', 'HOU']: raise vs.Error(f"InterFrame: '{InputType}' is not a valid InputType") @@ -3875,7 +3949,7 @@ def InterFrameProcess(clip): SmoothString += ',area_sharp:1.2},scene:{blend:true,mode:0}}' # Make interpolation vector clip - Super = core.svp1.Super(clip, SuperString) + Super = clip.svp1.Super(SuperString) Vectors = core.svp1.Analyse(Super['clip'], Super['data'], clip, VectorsString) # Put it together @@ -3883,21 +3957,21 @@ def InterFrameProcess(clip): # Get either 1 or 2 clips depending on InputType if InputType == 'SBS': - FirstEye = InterFrameProcess(core.std.Crop(Input, right=Input.width // 2)) - SecondEye = InterFrameProcess(core.std.Crop(Input, left=Input.width // 2)) + FirstEye = InterFrameProcess(Input.std.Crop(right=Input.width // 2)) + SecondEye = InterFrameProcess(Input.std.Crop(left=Input.width // 2)) return core.std.StackHorizontal([FirstEye, SecondEye]) elif InputType == 'OU': - FirstEye = InterFrameProcess(core.std.Crop(Input, bottom=Input.height // 2)) - SecondEye = InterFrameProcess(core.std.Crop(Input, top=Input.height // 2)) + FirstEye = InterFrameProcess(Input.std.Crop(bottom=Input.height // 2)) + SecondEye = InterFrameProcess(Input.std.Crop(top=Input.height // 2)) return core.std.StackVertical([FirstEye, SecondEye]) elif InputType == 'HSBS': - FirstEye = InterFrameProcess(core.std.Crop(Input, right=Input.width // 2).resize.Spline36(Input.width, Input.height)) - SecondEye = InterFrameProcess(core.std.Crop(Input, left=Input.width // 2).resize.Spline36(Input.width, Input.height)) - return core.std.StackHorizontal([core.resize.Spline36(FirstEye, Input.width // 2, Input.height), core.resize.Spline36(SecondEye, Input.width // 2, Input.height)]) + FirstEye = InterFrameProcess(Input.std.Crop(right=Input.width // 2).resize.Spline36(Input.width, Input.height)) + SecondEye = InterFrameProcess(Input.std.Crop(left=Input.width // 2).resize.Spline36(Input.width, Input.height)) + return core.std.StackHorizontal([FirstEye.resize.Spline36(Input.width // 2, Input.height), SecondEye.resize.Spline36(Input.width // 2, Input.height)]) elif InputType == 'HOU': - FirstEye = InterFrameProcess(core.std.Crop(Input, bottom=Input.height // 2).resize.Spline36(Input.width, Input.height)) - SecondEye = InterFrameProcess(core.std.Crop(Input, top=Input.height // 2).resize.Spline36(Input.width, Input.height)) - return core.std.StackVertical([core.resize.Spline36(FirstEye, Input.width, Input.height // 2), core.resize.Spline36(SecondEye, Input.width, Input.height // 2)]) + FirstEye = InterFrameProcess(Input.std.Crop(bottom=Input.height // 2).resize.Spline36(Input.width, Input.height)) + SecondEye = InterFrameProcess(Input.std.Crop(top=Input.height // 2).resize.Spline36(Input.width, Input.height)) + return core.std.StackVertical([FirstEye.resize.Spline36(Input.width, Input.height // 2), SecondEye.resize.Spline36(Input.width, Input.height // 2)]) else: return InterFrameProcess(Input) @@ -3906,8 +3980,9 @@ def InterFrameProcess(clip): def FixColumnBrightness(c, column, input_low, input_high, output_low, output_high): if not isinstance(c, vs.VideoNode): raise vs.Error('FixColumnBrightness: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixColumnBrightness: RGB color family is not supported') + raise vs.Error('FixColumnBrightness: RGB format is not supported') peak = (1 << c.format.bits_per_sample) - 1 @@ -3923,7 +3998,7 @@ def FixColumnBrightness(c, column, input_low, input_high, output_low, output_hig output_high = scale(output_high, peak) last = SmoothLevels(c, input_low, 1, input_high, output_low, output_high, Smode=0) - last = core.std.CropAbs(last, width=1, height=c.height, left=column) + last = last.std.CropAbs(width=1, height=c.height, left=column) last = Overlay(c, last, x=column) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -3934,8 +4009,9 @@ def FixColumnBrightness(c, column, input_low, input_high, output_low, output_hig def FixRowBrightness(c, row, input_low, input_high, output_low, output_high): if not isinstance(c, vs.VideoNode): raise vs.Error('FixRowBrightness: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixRowBrightness: RGB color family is not supported') + raise vs.Error('FixRowBrightness: RGB format is not supported') peak = (1 << c.format.bits_per_sample) - 1 @@ -3951,7 +4027,7 @@ def FixRowBrightness(c, row, input_low, input_high, output_low, output_high): output_high = scale(output_high, peak) last = SmoothLevels(c, input_low, 1, input_high, output_low, output_high, Smode=0) - last = core.std.CropAbs(last, width=c.width, height=1, top=row) + last = last.std.CropAbs(width=c.width, height=1, top=row) last = Overlay(c, last, y=row) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -3962,8 +4038,9 @@ def FixRowBrightness(c, row, input_low, input_high, output_low, output_high): def FixColumnBrightnessProtect(c, column, input_low, input_high, output_low, output_high, protect_value=20): if not isinstance(c, vs.VideoNode): raise vs.Error('FixColumnBrightnessProtect: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixColumnBrightnessProtect: RGB color family is not supported') + raise vs.Error('FixColumnBrightnessProtect: RGB format is not supported') peak = (1 << c.format.bits_per_sample) - 1 @@ -3979,8 +4056,8 @@ def FixColumnBrightnessProtect(c, column, input_low, input_high, output_low, out output_high = scale(255 - output_high, peak) protect_value = scale(protect_value, peak) - last = SmoothLevels(core.std.Invert(c), input_low, 1, input_high, output_low, output_high, protect=protect_value, Smode=0).std.Invert() - last = core.std.CropAbs(last, width=1, height=c.height, left=column) + last = SmoothLevels(c.std.Invert(), input_low, 1, input_high, output_low, output_high, protect=protect_value, Smode=0).std.Invert() + last = last.std.CropAbs(width=1, height=c.height, left=column) last = Overlay(c, last, x=column) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -3990,8 +4067,9 @@ def FixColumnBrightnessProtect(c, column, input_low, input_high, output_low, out def FixRowBrightnessProtect(c, row, input_low, input_high, output_low, output_high, protect_value=20): if not isinstance(c, vs.VideoNode): raise vs.Error('FixRowBrightnessProtect: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixRowBrightnessProtect: RGB color family is not supported') + raise vs.Error('FixRowBrightnessProtect: RGB format is not supported') shift = c.format.bits_per_sample - 8 peak = (1 << c.format.bits_per_sample) - 1 @@ -4008,8 +4086,8 @@ def FixRowBrightnessProtect(c, row, input_low, input_high, output_low, output_hi output_high = scale(255 - output_high, peak) protect_value = scale(protect_value, peak) - last = SmoothLevels(core.std.Invert(c), input_low, 1, input_high, output_low, output_high, protect=protect_value, Smode=0).std.Invert() - last = core.std.CropAbs(last, width=c.width, height=1, top=row) + last = SmoothLevels(c.std.Invert(), input_low, 1, input_high, output_low, output_high, protect=protect_value, Smode=0).std.Invert() + last = last.std.CropAbs(width=c.width, height=1, top=row) last = Overlay(c, last, y=row) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -4026,8 +4104,10 @@ def FixRowBrightnessProtect(c, row, input_low, input_high, output_low, output_hi def FixColumnBrightnessProtect2(c, column, adj_val, prot_val=16): if not isinstance(c, vs.VideoNode): raise vs.Error('FixColumnBrightnessProtect2: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixColumnBrightnessProtect2: RGB color family is not supported') + raise vs.Error('FixColumnBrightnessProtect2: RGB format is not supported') + if not (-100 < adj_val < 100): raise vs.Error('FixColumnBrightnessProtect2: adj_val must be greater than -100 and less than 100') @@ -4040,8 +4120,8 @@ def FixColumnBrightnessProtect2(c, column, adj_val, prot_val=16): c_orig = None expr = f'x {scale(16, peak)} - {100 - adj_val} / 100 * {scale(16, peak)} + x {scale(255 - prot_val, peak)} - -10 / 0 max 1 min * x x {scale(245 - prot_val, peak)} - 10 / 0 max 1 min * +' - last = core.std.Expr([c], expr=[expr]) - last = core.std.CropAbs(last, width=1, height=c.height, left=column) + last = c.std.Expr(expr=[expr]) + last = last.std.CropAbs(width=1, height=c.height, left=column) last = Overlay(c, last, x=column) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -4051,8 +4131,10 @@ def FixColumnBrightnessProtect2(c, column, adj_val, prot_val=16): def FixRowBrightnessProtect2(c, row, adj_val, prot_val=16): if not isinstance(c, vs.VideoNode): raise vs.Error('FixRowBrightnessProtect2: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FixRowBrightnessProtect2: RGB color family is not supported') + raise vs.Error('FixRowBrightnessProtect2: RGB format is not supported') + if not (-100 < adj_val < 100): raise vs.Error('FixRowBrightnessProtect2: adj_val must be greater than -100 and less than 100') @@ -4065,8 +4147,8 @@ def FixRowBrightnessProtect2(c, row, adj_val, prot_val=16): c_orig = None expr = f'x {scale(16, peak)} - {100 - adj_val} / 100 * {scale(16, peak)} + x {scale(255 - prot_val, peak)} - -10 / 0 max 1 min * x x {scale(245 - prot_val, peak)} - 10 / 0 max 1 min * +' - last = core.std.Expr([c], expr=[expr]) - last = core.std.CropAbs(last, width=c.width, height=1, top=row) + last = c.std.Expr(expr=[expr]) + last = last.std.CropAbs(width=c.width, height=1, top=row) last = Overlay(c, last, y=row) if c_orig is not None: last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) @@ -4174,14 +4256,14 @@ def FixRowBrightnessProtect2(c, row, adj_val, prot_val=16): ### -------------------- ### In strength order: + 19 > 12 >> 20 > 11 - ### -### useDB [default: true] +### useDB [default: false] ### --------------------- ### Use f3kdb on top of removegrain: prevent posterize when doing levels conversion ### ### ######################################################################################### def SmoothLevels(input, input_low=0, gamma=1.0, input_high=None, output_low=0, output_high=None, chroma=50, limiter=0, Lmode=0, DarkSTR=100, BrightSTR=100, Ecenter=None, protect=-1, Ecurve=0, - Smode=-2, Mfactor=2, RGmode=12, useDB=True): + Smode=-2, Mfactor=2, RGmode=12, useDB=False): if not isinstance(input, vs.VideoNode): raise vs.Error('SmoothLevels: This is not a clip') @@ -4189,8 +4271,14 @@ def SmoothLevels(input, input_low=0, gamma=1.0, input_high=None, output_low=0, o raise vs.Error('SmoothLevels: RGB format is not supported') isGray = (input.format.color_family == vs.GRAY) - neutral = 1 << (input.format.bits_per_sample - 1) - peak = (1 << input.format.bits_per_sample) - 1 + isInteger = (input.format.sample_type == vs.INTEGER) + + if isInteger: + neutral = [1 << (input.format.bits_per_sample - 1)] * 2 + peak = (1 << input.format.bits_per_sample) - 1 + else: + neutral = [0.5, 0.0] + peak = 1.0 if chroma <= 0 and not isGray: input_orig = input @@ -4205,7 +4293,7 @@ def SmoothLevels(input, input_low=0, gamma=1.0, input_high=None, output_low=0, o output_high = peak if Ecenter is None: - Ecenter = neutral + Ecenter = neutral[0] if gamma <= 0: raise vs.Error('SmoothLevels: gamma must be greater than 0.0') @@ -4213,8 +4301,8 @@ def SmoothLevels(input, input_low=0, gamma=1.0, input_high=None, output_low=0, o if Mfactor <= 0: raise vs.Error('SmoothLevels: Mfactor must be greater than 0') - Dstr = DarkSTR / 100 - Bstr = BrightSTR / 100 + + if RGmode == 4: RemoveGrain = partial(core.std.Median) @@ -4228,86 +4316,93 @@ def SmoothLevels(input, input_low=0, gamma=1.0, input_high=None, output_low=0, o RemoveGrain = partial(core.rgvs.RemoveGrain, mode=[RGmode]) ### EXPRESSION - def get_lut(x): - exprY = ((x - input_low) / (input_high - input_low)) ** (1 / gamma) * (output_high - output_low) + output_low - if isinstance(exprY, complex): - exprY = exprY.real - - if Lmode <= 0: - exprL = 1 - elif Ecurve <= 0: - if Lmode == 1: - if x < Ecenter: - exprL = math.sin(x * (333 / 106) / (2 * Ecenter)) ** Dstr - elif x > Ecenter: - exprL = math.sin((333 / 106) / 2 + (x - Ecenter) * (333 / 106) / (2 * (peak - Ecenter))) ** Bstr - else: - exprL = 1 - elif Lmode == 2: - exprL = math.sin(x * (333 / 106) / (2 * peak)) ** Dstr - else: - exprL = math.sin((333 / 106) / 2 + x * (333 / 106) / (2 * peak)) ** Bstr + + exprY = f'x {input_low} - {input_high - input_low} / {1 / gamma} pow {output_high - output_low} * {output_low} +' + + + + scaleC = ((output_high - output_low) / (input_high - input_low) + 100 / chroma - 1) / (100 / chroma) + exprC = f'x {neutral[1]} - {scaleC} * {neutral[1]} +' + + Dstr = DarkSTR / 100 + Bstr = BrightSTR / 100 + + if Lmode <= 0: + exprL = '1' + elif Ecurve <= 0: + raise vs.Error('SmoothLevels: Ecurve=0 is unusable at the moment due to missing sin operator in Expr') + if Lmode == 1: + exprL = f'x {Ecenter} < x {333 / 106} * {2 * Ecenter} / sin {Dstr} pow x {Ecenter} > {(333 / 106) / 2} x {Ecenter} - {333 / 106} * {2 * (peak - Ecenter)} / + sin {Bstr} pow 1 ? ?' + + + + + + elif Lmode == 2: + exprL = f'x {333 / 106} * {2 * peak} / sin {Dstr} pow' else: - if Lmode == 1: - if x < Ecenter: - exprL = abs(x / Ecenter) ** Dstr - elif x > Ecenter: - exprL = (1 - abs((x - Ecenter) / (peak - Ecenter))) ** Bstr - else: - exprL = 1 - elif Lmode == 2: - exprL = (1 - abs((x - peak) / peak)) ** Dstr - else: - exprL = abs((x - peak) / peak) ** Bstr - - if protect <= -1: - exprP = 1 - elif Ecurve <= 0: - if x <= protect: - exprP = 0 - elif x >= protect + scale(16, peak): - exprP = 1 - else: - exprP = math.sin((x - protect) * (333 / 106) / (2 * scale(16, peak))) + exprL = f'{(333 / 106) / 2} x {333 / 106} * {2 * peak} / + sin {Bstr} pow' + else: + if Lmode == 1: + exprL = f'x {Ecenter} < x {Ecenter} / abs {Dstr} pow x {Ecenter} > 1 x {Ecenter} - {peak - Ecenter} / abs - {Bstr} pow 1 ? ?' + + + + + + elif Lmode == 2: + exprL = f'1 x {peak} - {peak} / abs - {Dstr} pow' else: - if x <= protect: - exprP = 0 - elif x >= protect + scale(16, peak): - exprP = 1 - else: - exprP = abs((x - protect) / scale(16, peak)) - - return min(max(cround(exprL * exprP * (exprY - x) + x), 0), peak) + exprL = f'x {peak} - {peak} / abs {Bstr} pow' + + if protect <= -1: + exprP = '1' + elif Ecurve <= 0: + raise vs.Error('SmoothLevels: Ecurve=0 is unusable at the moment due to missing sin operator in Expr') + exprP = f'x {protect} <= 0 x {protect + scale(16, peak)} >= 1 x {protect} - {333 / 106} * {2 * scale(16, peak)} / sin ? ?' + + + + + + + + + + else: + exprP = f'x {protect} <= 0 x {protect + scale(16, peak)} >= 1 x {protect} - {scale(16, peak)} / abs ? ?' + + ### PROCESS if limiter == 1 or limiter >= 3: - limitI = input.std.Expr(expr=[f'x {input_low} < {input_low} x {input_high} > {input_high} x ? ?']) + limitI = input.std.Expr(expr=[f'x {input_low} max {input_high} min']) else: limitI = input - level = limitI.std.Lut(planes=[0], function=get_lut) - if chroma > 0 and not isGray: - scaleC = ((output_high - output_low) / (input_high - input_low) + 100 / chroma - 1) / (100 / chroma) - level = level.std.Expr(expr=['', f'x {neutral} - {scaleC} * {neutral} +']) - diff = core.std.Expr([limitI, level], expr=[f'x y - {Mfactor} * {neutral} +']) + expr = exprL + ' ' + exprP + ' * ' + exprY + ' x - * x +' + level = limitI.std.Expr(expr=[expr] if chroma <= 0 or isGray else [expr, exprC]) + + + diff = core.std.Expr([limitI, level], expr=[f'x y - {Mfactor} * {neutral[1]} +']) process = RemoveGrain(diff) if useDB: - process = process.std.Expr(expr=[f'x {neutral} - {Mfactor} / {neutral} +']).f3kdb.Deband(grainy=0, grainc=0, output_depth=input.format.bits_per_sample) + process = process.std.Expr(expr=[f'x {neutral[1]} - {Mfactor} / {neutral[1]} +']).f3kdb.Deband(grainy=0, grainc=0, output_depth=input.format.bits_per_sample) smth = core.std.MakeDiff(limitI, process) else: - smth = core.std.Expr([limitI, process], expr=[f'x y {neutral} - {Mfactor} / -']) + smth = core.std.Expr([limitI, process], expr=[f'x y {neutral[1]} - {Mfactor} / -']) - level2 = core.std.Expr([limitI, diff], expr=[f'x y {neutral} - {Mfactor} / -']) - diff2 = core.std.Expr([level2, level], expr=[f'x y - {Mfactor} * {neutral} +']) + level2 = core.std.Expr([limitI, diff], expr=[f'x y {neutral[1]} - {Mfactor} / -']) + diff2 = core.std.Expr([level2, level], expr=[f'x y - {Mfactor} * {neutral[1]} +']) process2 = RemoveGrain(diff2) if useDB: - process2 = process2.std.Expr(expr=[f'x {neutral} - {Mfactor} / {neutral} +']).f3kdb.Deband(grainy=0, grainc=0, output_depth=input.format.bits_per_sample) + process2 = process2.std.Expr(expr=[f'x {neutral[1]} - {Mfactor} / {neutral[1]} +']).f3kdb.Deband(grainy=0, grainc=0, output_depth=input.format.bits_per_sample) smth2 = core.std.MakeDiff(smth, process2) else: - smth2 = core.std.Expr([smth, process2], expr=[f'x y {neutral} - {Mfactor} / -']) + smth2 = core.std.Expr([smth, process2], expr=[f'x y {neutral[1]} - {Mfactor} / -']) - mask1 = core.std.Expr([limitI, level], expr=[f'x y - abs {neutral} {Mfactor} / >= {peak} 0 ?']) - mask2 = core.std.Expr([limitI, level], expr=[f'x y - abs {peak} {Mfactor} / >= {peak} 0 ?']) + mask1 = core.std.Expr([limitI, level], expr=[f'x y - abs {neutral[0] / Mfactor} >= {peak} 0 ?']) + mask2 = core.std.Expr([limitI, level], expr=[f'x y - abs {peak / Mfactor} >= {peak} 0 ?']) if Smode >= 2: Slevel = smth2 @@ -4321,7 +4416,7 @@ def get_lut(x): Slevel = level if limiter >= 2: - limitO = Slevel.std.Expr(expr=[f'x {output_low} < {output_low} x {output_high} > {output_high} x ? ?']) + limitO = Slevel.std.Expr(expr=[f'x {output_low} max {output_high} min']) else: limitO = Slevel @@ -4360,10 +4455,11 @@ def get_lut(x): def FastLineDarkenMOD(c, strength=48, protection=5, luma_cap=191, threshold=4, thinning=0): if not isinstance(c, vs.VideoNode): raise vs.Error('FastLineDarkenMOD: This is not a clip') + if c.format.color_family == vs.RGB: - raise vs.Error('FastLineDarkenMOD: RGB color family is not supported') + raise vs.Error('FastLineDarkenMOD: RGB format is not supported') - peak = (1 << c.format.bits_per_sample) - 1 + peak = (1 << c.format.bits_per_sample) - 1 if c.format.sample_type == vs.INTEGER else 1.0 if c.format.color_family != vs.GRAY: c_orig = c @@ -4378,14 +4474,14 @@ def FastLineDarkenMOD(c, strength=48, protection=5, luma_cap=191, threshold=4, t thn = thinning / 16 ## filtering ## - exin = core.std.Maximum(c, threshold=peak // (protection + 1)).std.Minimum() + exin = c.std.Maximum(threshold=peak / (protection + 1)).std.Minimum() thick = core.std.Expr([c, exin], expr=[f'y {lum} < y {lum} ? x {thr} + > x y {lum} < y {lum} ? - 0 ? {Str} * x +']) if thinning <= 0: last = thick else: diff = core.std.Expr([c, exin], expr=[f'y {lum} < y {lum} ? x {thr} + > x y {lum} < y {lum} ? - 0 ? {scale(127, peak)} +']) - linemask = core.std.Minimum(diff).std.Expr(expr=[f'x {scale(127, peak)} - {thn} * {peak} +']).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) - thin = core.std.Expr([core.std.Maximum(c), diff], expr=[f'x y {scale(127, peak)} - {Str} 1 + * +']) + linemask = diff.std.Minimum().std.Expr(expr=[f'x {scale(127, peak)} - {thn} * {peak} +']).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + thin = core.std.Expr([c.std.Maximum(), diff], expr=[f'x y {scale(127, peak)} - {Str} 1 + * +']) last = core.std.MaskedMerge(thin, thick, linemask) if c_orig is not None: @@ -4413,8 +4509,9 @@ def FastLineDarkenMOD(c, strength=48, protection=5, luma_cap=191, threshold=4, t def Toon(input, str=1.0, l_thr=2, u_thr=12, blur=2, depth=32): if not isinstance(input, vs.VideoNode): raise vs.Error('Toon: This is not a clip') + if input.format.color_family == vs.RGB: - raise vs.Error('Toon: RGB color family is not supported') + raise vs.Error('Toon: RGB format is not supported') neutral = 1 << (input.format.bits_per_sample - 1) peak = (1 << input.format.bits_per_sample) - 1 @@ -4432,10 +4529,10 @@ def Toon(input, str=1.0, l_thr=2, u_thr=12, blur=2, depth=32): uthr8 = uthr / multiple ludiff = u_thr - l_thr - last = core.std.MakeDiff(core.std.Maximum(input).std.Minimum(), input) + last = core.std.MakeDiff(input.std.Maximum().std.Minimum(), input) last = core.std.Expr([last, Padding(last, 6, 6, 6, 6).warp.AWarpSharp2(blur=blur, depth=depth).std.Crop(6, 6, 6, 6)], expr=['x y min']) expr = f'y {lthr} <= {neutral} y {uthr} >= x {uthr8} y {multiple} / - 128 * x {multiple} / y {multiple} / {lthr8} - * + {ludiff} / {multiple} * ? {neutral} - {str} * {neutral} + ?' - last = core.std.MakeDiff(input, core.std.Expr([last, core.std.Maximum(last)], expr=[expr])) + last = core.std.MakeDiff(input, core.std.Expr([last, last.std.Maximum()], expr=[expr])) if input_orig is not None: last = core.std.ShufflePlanes([last, input_orig], planes=[0, 1, 2], colorfamily=input_orig.format.color_family) @@ -4752,7 +4849,7 @@ def LSFmod(input, strength=100, Smode=None, Smethod=None, kernel=11, preblur=0, peak = (1 << input.format.bits_per_sample) - 1 factor = 1 << (input.format.bits_per_sample - 8) else: - neutral = 0 + neutral = 0.0 peak = 1.0 factor = 255.0 @@ -4842,7 +4939,7 @@ def LSFmod(input, strength=100, Smode=None, Smethod=None, kernel=11, preblur=0, expr = 'x {i} < {peak} x {j} > 0 {peak} x {i} - {peak} {j} {i} - / * - ? ?'.format(i=scale(16, peak), j=scale(75, peak), peak=peak) pre = core.std.MaskedMerge(tmp.dfttest.DFTTest(tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0]), tmp, tmp.std.Expr(expr=[expr])) else: - pre = MinBlur(tmp, preblur) + pre = MinBlur(tmp, r=preblur) dark_limit = pre.std.Minimum() bright_limit = pre.std.Maximum() @@ -5058,13 +5155,13 @@ def TemporalDegrain( \ # Taking care of a missing denoising clip and use of fft3d to determine it if denoiseClip is None: - denoiseClip = neo_fft3d.FFT3D(inpClip, sigma=sigma\ + denoiseClip = inpClip.neo_fft3d.FFT3D(sigma=sigma\ , sigma2=sigma2, sigma3=sigma3, sigma4=sigma4, bw=blockWidth\ , bh=blockHeight, ow=overlapWidth, oh=overlapHeight) # If HQ is activated, do an additional denoising if HQ > 0: - filterClip = core.hqdn3d.Hqdn3d(denoiseClip, 4,3,6,3) + filterClip = denoiseClip.hqdn3d.Hqdn3d(4,3,6,3) else: filterClip = denoiseClip @@ -5075,28 +5172,28 @@ def TemporalDegrain( \ # Motion vector search (With very basic parameters. Add your own parameters # as needed.) - srchSuper = core.mv.Super(filterClip, pel=pel) + srchSuper = filterClip.mv.Super(pel=pel) if degrain == 3: - bvec3 = core.mv.Analyse(srchSuper, isb=True, delta=3, blksize=blockSize\ + bvec3 = srchSuper.mv.Analyse(isb=True, delta=3, blksize=blockSize\ , overlap=overlapValue) - fvec3 = core.mv.Analyse(srchSuper, isb=False, delta=3, blksize=blockSize\ + fvec3 = srchSuper.mv.Analyse(isb=False, delta=3, blksize=blockSize\ , overlap=overlapValue) if degrain >= 2: - bvec2 = core.mv.Analyse(srchSuper, isb=True, delta=2, blksize=blockSize\ + bvec2 = srchSuper.mv.Analyse(isb=True, delta=2, blksize=blockSize\ , overlap=overlapValue) - fvec2 = core.mv.Analyse(srchSuper, isb=False, delta=2, blksize=blockSize\ + fvec2 = srchSuper.mv.Analyse(isb=False, delta=2, blksize=blockSize\ , overlap=overlapValue) - bvec1 = core.mv.Analyse(srchSuper, isb=True, delta=1, blksize=blockSize\ + bvec1 = srchSuper.mv.Analyse(isb=True, delta=1, blksize=blockSize\ , overlap=overlapValue) - fvec1 = core.mv.Analyse(srchSuper, isb=False, delta=1, blksize=blockSize\ + fvec1 = srchSuper.mv.Analyse(isb=False, delta=1, blksize=blockSize\ , overlap=overlapValue) # First MV-denoising stage. Usually here's some temporal-medianfiltering # going on. For simplicity, we just use MVDegrain. - inpSuper = core.mv.Super(inpClip, pel=2, levels=1) + inpSuper = inpClip.mv.Super(pel=2, levels=1) if degrain == 3: nr1 = core.mv.Degrain3(inpClip, inpSuper, bvec1, fvec1, bvec2, fvec2\ , bvec3, fvec3, thsad=thrDegrain1, limit=maxPxChange) @@ -5113,7 +5210,7 @@ def TemporalDegrain( \ nr1X = core.std.MakeDiff(inpClip, dd, planes=0) # Second MV-denoising stage - nr1x_super = core.mv.Super(nr1X, pel=2, levels=1) + nr1x_super = nr1X.mv.Super(pel=2, levels=1) if degrain == 3: nr2 = core.mv.Degrain3(nr1X, nr1x_super, bvec1, fvec1, bvec2, fvec2\ @@ -5127,7 +5224,7 @@ def TemporalDegrain( \ # Temporal filter to remove the last bits of dancinc pixels, YMMV. if HQ >= 2: - nr2 = core.hqdn3d.Hqdn3d(nr2, 0,0,4,1) + nr2 = nr2.hqdn3d.Hqdn3d(0,0,4,1) # Contra-sharpening: sharpen the denoised clip, but don't add more than # what was removed previously. @@ -5199,35 +5296,37 @@ def aaf( \ sx = inputClip.width sy = inputClip.height + isGray = (inputClip.format.color_family == vs.GRAY) + neutral = 1 << (inputClip.format.bits_per_sample - 1) peak = (1 << inputClip.format.bits_per_sample) - 1 - isGray = (inputClip.format.color_family == vs.GRAY) + if aay > 0: # Do the upscaling if aas < 0: - aa = core.resize.Lanczos(inputClip, sx, 4*int(sy*aar)) + aa = inputClip.resize.Lanczos(sx, 4*int(sy*aar)) elif aar == 0.5: - aa = core.resize.Point(inputClip, 2*sx, 2*sy) + aa = inputClip.resize.Point(2*sx, 2*sy) else: - aa = core.resize.Lanczos(inputClip, 4*int(sx*aar), 4*int(sy*aar)) + aa = inputClip.resize.Lanczos(4*int(sx*aar), 4*int(sy*aar)) # y-Edges - aa = core.sangnom.SangNom(aa, aa=aay) + aa = aa.sangnom.SangNom(aa=aay) else: aa = inputClip if aax > 0: if aas < 0: - aa = core.resize.Lanczos(aa, 4*int(sx*aar), sy) - aa = core.std.Transpose(aa) + aa = aa.resize.Lanczos(4*int(sx*aar), sy) + aa = aa.std.Transpose() # x-Edges - aa = core.sangnom.SangNom(aa, aa=aax) - aa = core.std.Transpose(aa) + aa = aa.sangnom.SangNom(aa=aax) + aa = aa.std.Transpose() # Restore original scaling - aa = core.resize.Lanczos(aa, sx, sy) + aa = aa.resize.Lanczos(sx, sy) repMode = [18] if isGray else [18, 0] @@ -5238,11 +5337,11 @@ def aaf( \ return aa # u=1, v=1 is not directly so use the copy - mask = core.std.MakeDiff(core.std.Maximum(inputClip, planes=0)\ - , core.std.Minimum(inputClip, planes=0)\ + mask = core.std.MakeDiff(inputClip.std.Maximum(planes=0)\ + , inputClip.std.Minimum(planes=0)\ , planes=0) expr = 'x {i} > {estr} x {neutral} - {j} 90 / * {bstr} + ?'.format(i=scale(218, peak), estr=scale(estr, peak), neutral=neutral, j=estr - bstr, bstr=scale(bstr, peak)) - mask = core.std.Expr(mask, expr=[expr] if isGray else [expr, '']) + mask = mask.std.Expr(expr=[expr] if isGray else [expr, '']) merged = core.std.MaskedMerge(inputClip, aa, mask, planes=0) if aas > 0.84: @@ -5262,9 +5361,9 @@ def AverageFrames(clip, weights, scenechange=None, planes=None): raise vs.Error('AverageFrames: This is not a clip') if scenechange: - clip = SCDetect(clip, scenechange) - scenechange = True - return core.misc.AverageFrames(clip, weights=weights, scenechange=scenechange, planes=planes) + clip = SCDetect(clip, threshold=scenechange) + + return clip.misc.AverageFrames(weights=weights, scenechange=scenechange, planes=planes) def AvsPrewitt(clip, planes=None): @@ -5286,40 +5385,44 @@ def AvsPrewitt(clip, planes=None): def Bob(clip, b=1/3, c=1/3, tff=None): if not isinstance(clip, vs.VideoNode): raise vs.Error('Bob: This is not a clip') + if not isinstance(tff, bool): raise vs.Error("Bob: 'tff' must be set. Setting tff to true means top field first and false means bottom field first") - bits = clip.format.bits_per_sample - clip = core.std.SeparateFields(clip, tff).fmtc.resample(scalev=2, kernel='bicubic', a1=b, a2=c, interlaced=1, interlacedd=0) + bits_per_sample = clip.format.bits_per_sample + clip = clip.std.SeparateFields(tff=tff).fmtc.resample(scalev=2, kernel='bicubic', a1=b, a2=c, interlaced=1, interlacedd=0) - if clip.format.bits_per_sample != bits: - return core.fmtc.bitdepth(clip, bits=bits, dmode=1) - else: - return clip + if clip.format.bits_per_sample != bits_per_sample: + clip = clip.fmtc.bitdepth(bits=bits_per_sample, dmode=1) + + return clip def ChangeFPS(clip, fpsnum, fpsden=1): if not isinstance(clip, vs.VideoNode): raise vs.Error('ChangeFPS: This is not a clip') - multiple = fpsnum / fpsden * clip.fps_den / clip.fps_num + factor = (fpsnum / fpsden) * (clip.fps_den / clip.fps_num) def frame_adjuster(n): - real_n = math.floor(n / multiple) + real_n = math.floor(n / factor) one_frame_clip = clip[real_n] * (len(clip) + 100) return one_frame_clip - attribute_clip = core.std.BlankClip(clip, length=math.floor(len(clip) * multiple), fpsnum=fpsnum, fpsden=fpsden) - return core.std.FrameEval(attribute_clip, eval=frame_adjuster) + attribute_clip = clip.std.BlankClip(length=math.floor(len(clip) * factor), fpsnum=fpsnum, fpsden=fpsden) + return attribute_clip.std.FrameEval(eval=frame_adjuster) -def Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=[0, 1, 2]): +def Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=None): if not (isinstance(clip, vs.VideoNode) and isinstance(bright_limit, vs.VideoNode) and isinstance(dark_limit, vs.VideoNode)): raise vs.Error('Clamp: This is not a clip') + if bright_limit.format.id != clip.format.id or dark_limit.format.id != clip.format.id: - raise vs.Error('Clamp: All clips must have the same format') + raise vs.Error('Clamp: Clips must be the same format') - if isinstance(planes, int): + if planes is None: + planes = list(range(clip.format.num_planes)) + elif isinstance(planes, int): planes = [planes] expr = f'x y {overshoot} + > y {overshoot} + x ? z {undershoot} - < z {undershoot} - x y {overshoot} + > y {overshoot} + x ? ?' @@ -5329,77 +5432,91 @@ def Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=[0, def KNLMeansCL(clip, d=None, a=None, s=None, h=None, wmode=None, wref=None, device_type=None, device_id=None): if not isinstance(clip, vs.VideoNode): raise vs.Error('KNLMeansCL: This is not a clip') + if clip.format.color_family not in [vs.YUV, vs.YCOCG]: - raise vs.Error('KNLMeansCL: This wrapper is intended to be used for color family of YUV and YCoCg only') + raise vs.Error('KNLMeansCL: This wrapper is intended to be used only for YUV and YCoCg formats') if clip.format.subsampling_w > 0 or clip.format.subsampling_h > 0: - return core.knlm.KNLMeansCL(clip, d=d, a=a, s=s, h=h, wmode=wmode, wref=wref, device_type=device_type, device_id=device_id).knlm.KNLMeansCL( - channels='UV', d=d, a=a, s=s, h=h, wmode=wmode, wref=wref, device_type=device_type, device_id=device_id) - else: - return core.knlm.KNLMeansCL(clip, d=d, a=a, s=s, h=h, channels='YUV', wmode=wmode, wref=wref, device_type=device_type, device_id=device_id) - - -def Overlay(clipa, clipb, x=0, y=0, mask=None, opacity=1.0, mode='blend'): - if not (isinstance(clipa, vs.VideoNode) and isinstance(clipb, vs.VideoNode)): + return clip.knlm.KNLMeansCL(d=d, a=a, s=s, h=h, wmode=wmode, wref=wref, device_type=device_type, device_id=device_id).knlm.KNLMeansCL( + channels='UV', d=d, a=a, s=s, h=h, wmode=wmode, wref=wref, device_type=device_type, device_id=device_id) + else: + return clip.knlm.KNLMeansCL(d=d, a=a, s=s, h=h, channels='YUV', wmode=wmode, wref=wref, device_type=device_type, device_id=device_id) + + +# Available blend modes: +# normal +# addition +# average +# burn +# darken +# difference +# divide +# dodge +# exclusion +# extremity +# freeze +# glow +# hardlight +# hardmix +# heat +# lighten +# linearlight +# multiply +# negation +# overlay +# phoenix +# pinlight +# reflect +# screen +# subtract +# vividlight +def Overlay(base, overlay, x=0, y=0, mask=None, opacity=1.0, mode='normal', planes=None): + if not (isinstance(base, vs.VideoNode) and isinstance(overlay, vs.VideoNode)): raise vs.Error('Overlay: This is not a clip') if mask is not None: if not isinstance(mask, vs.VideoNode): raise vs.Error("Overlay: 'mask' is not a clip") - if mask.width != clipb.width or mask.height != clipb.height or mask.format.bits_per_sample != clipb.format.bits_per_sample: - raise vs.Error("Overlay: 'mask' must have the same dimensions and bit depth as 'clipb'") - - isGray = (clipa.format.color_family == vs.GRAY) - sample_type = clipa.format.sample_type - bits_per_sample = clipa.format.bits_per_sample + if mask.width != overlay.width or mask.height != overlay.height or mask.format.bits_per_sample != overlay.format.bits_per_sample: + raise vs.Error("Overlay: 'mask' must have the same dimensions and bit depth as 'overlay'") - if sample_type == vs.INTEGER: - neutral = [1 << (bits_per_sample - 1)] * 2 - peak = (1 << bits_per_sample) - 1 - range = peak + 1 - factor = 1 / range + if base.format.sample_type == vs.INTEGER: + neutral = 1 << (base.format.bits_per_sample - 1) + peak = (1 << base.format.bits_per_sample) - 1 + factor = 1 << base.format.bits_per_sample else: - neutral = [0.5, 0.0] - peak = range = factor = 1.0 - - matrix = '709' if clipa.width > 1024 or clipa.height > 576 else '170m' - matrix_in_s = matrix_s = None - - opacity = min(max(opacity, 0.0), 1.0) - mode = mode.lower() + neutral = 0.5 + peak = factor = 1.0 - if mode == 'chroma' and isGray: - return clipa + if planes is None: + planes = list(range(base.format.num_planes)) + elif isinstance(planes, int): + planes = [planes] - if clipa.format.subsampling_w > 0 or clipa.format.subsampling_h > 0: - clipa_orig = clipa - clipa = core.resize.Point(clipa, format=core.register_format(clipa.format.color_family, sample_type, bits_per_sample, 0, 0).id) + if base.format.subsampling_w > 0 or base.format.subsampling_h > 0: + base_orig = base + base = base.resize.Point(format=base.format.replace(subsampling_w=0, subsampling_h=0)) else: - clipa_orig = None + base_orig = None - if clipb.format.id != clipa.format.id: - clipb = core.resize.Point(clipb, format=clipa.format.id) + if overlay.format.id != base.format.id: + overlay = overlay.resize.Point(format=base.format) - if mask is not None and mask.format.id != clipb.format.id: + if mask is None: + mask = overlay.std.BlankClip(format=overlay.format.replace(color_family=vs.GRAY, subsampling_w=0, subsampling_h=0), color=[peak]) + elif mask.format.id != overlay.format.id: if mask.format.color_family != vs.GRAY: - mask = core.resize.Point(mask, format=clipb.format.id, range_s='full') + mask = mask.resize.Point(format=overlay.format, range_s='full') else: - mask = core.std.ShufflePlanes([mask], planes=[0, 0, 0], colorfamily=clipb.format.color_family) - - if mask is None and mode in ['blend', 'chroma', 'luma', 'difference']: - mask = core.std.BlankClip(clipb, format=core.register_format(vs.GRAY, sample_type, bits_per_sample, 0, 0).id, color=[peak]) + mask = mask.std.ShufflePlanes(planes=[0, 0, 0], colorfamily=overlay.format.color_family) - if mode in ['chroma', 'luma', 'multiply', 'lighten', 'darken', 'softlight', 'hardlight', 'difference', 'exclusion'] and clipa.format.color_family == vs.RGB: - clipa_orig = clipa - clipa = core.resize.Point(clipa, format=core.register_format(vs.YUV, sample_type, bits_per_sample, 0, 0).id, matrix_s=matrix, range_s='full') - clipb = core.resize.Point(clipb, format=core.register_format(vs.YUV, sample_type, bits_per_sample, 0, 0).id, matrix_s=matrix, range_s='full') - if mask is not None: - mask = core.std.ShufflePlanes([mask], planes=[0, 0, 0], colorfamily=vs.YUV) + opacity = min(max(opacity, 0.0), 1.0) + mode = mode.lower() # Calculate padding sizes - l, r = x, clipa.width - clipb.width - x - t, b = y, clipa.height - clipb.height - y + l, r = x, base.width - overlay.width - x + t, b = y, base.height - overlay.height - y # Split into crop and padding values cl, pl = min(l, 0) * -1, max(l, 0) @@ -5408,118 +5525,89 @@ def Overlay(clipa, clipb, x=0, y=0, mask=None, opacity=1.0, mode='blend'): cb, pb = min(b, 0) * -1, max(b, 0) # Crop and padding - if mode in ['multiply', 'darken']: - color = [peak] * clipb.format.num_planes - elif mode in ['softlight', 'hardlight']: - color = [neutral[0]] if isGray else [neutral[0], neutral[1], neutral[1]] - else: - color = None - - clipb = core.std.Crop(clipb, left=cl, right=cr, top=ct, bottom=cb) - clipb = core.std.AddBorders(clipb, left=pl, right=pr, top=pt, bottom=pb, color=color) - if mask is not None: - mask = core.std.Crop(mask, left=cl, right=cr, top=ct, bottom=cb) - mask = core.std.AddBorders(mask, left=pl, right=pr, top=pt, bottom=pb, color=[0] * mask.format.num_planes) - - if mode in ['blend', 'chroma', 'luma']: - if opacity < 1: - mask = core.std.Expr([mask], expr=[f'x {opacity} *']) - - if mode == 'luma' or isGray: - planes = [0] - elif mode == 'chroma': - planes = [1, 2] - else: - planes = [0, 1, 2] - - last = core.std.MaskedMerge(clipa, clipb, mask, planes=planes, first_plane=True) - elif mode in ['add', 'subtract']: - if clipa.format.color_family in [vs.YUV, vs.YCOCG]: - if clipa_orig is None: - clipa_orig = clipa - clipa = core.resize.Point(clipa, format=core.register_format(vs.RGB, sample_type, bits_per_sample, 0, 0).id, matrix_in_s=matrix) - clipb = core.resize.Point(clipb, format=core.register_format(vs.RGB, sample_type, bits_per_sample, 0, 0).id, matrix_in_s=matrix) - if mask is not None: - mask = core.std.ShufflePlanes([mask], planes=[0, 0, 0], colorfamily=vs.RGB) - matrix_s = matrix - - if mask is None: - expr = f'x y {opacity} * +' if mode == 'add' else f'x y {opacity} * -' - last = core.std.Expr([clipa, clipb], expr=[expr]) - else: - expr = f'x y z * {opacity} * {factor} * +' if mode == 'add' else f'x y z * {opacity} * {factor} * -' - last = core.std.Expr([clipa, clipb, mask], expr=[expr]) - elif mode == 'multiply': - if not isGray: - clipb = core.std.ShufflePlanes([clipb], planes=[0, 0, 0], colorfamily=clipb.format.color_family) - - if mask is None: - exprY = f'x {range} {1 - opacity} * y {opacity} * + * {factor} *' - exprUV = f'x {range} * {1 - opacity} * x y * {range} y - {neutral[1]} * + {opacity} * + {factor} *' - last = core.std.Expr([clipa, clipb], expr=[exprY] if isGray else [exprY, exprUV]) - else: - exprY = f'x {range} {range} z {opacity} * - * y z * {opacity} * + * {factor * factor} *' - exprUV = f'x {range} * {range} z {opacity} * - * x y * {neutral[1]} {range} y - * + z * {opacity} * + {factor * factor} *' - last = core.std.Expr([clipa, clipb, mask], expr=[exprY] if isGray else [exprY, exprUV]) - elif mode in ['lighten', 'darken']: - cmp = core.std.Expr([mvf.GetPlane(clipa, 0), mvf.GetPlane(clipb, 0)], expr=['y x > 1 0 ?' if mode == 'lighten' else 'y x < 1 0 ?']) - if not isGray: - cmp = core.std.ShufflePlanes([cmp], planes=[0, 0, 0], colorfamily=clipa.format.color_family) - - if mask is None: - expr = f'z 1 = x {1 - opacity} * y {opacity} * + x ?' - last = core.std.Expr([clipa, clipb, cmp], expr=[expr]) - else: - expr = f'a 1 = x {range} z {opacity} * - * y z * {opacity} * + {factor} * x ?' - last = core.std.Expr([clipa, clipb, mask, cmp], expr=[expr]) - elif mode in ['softlight', 'hardlight']: - if mask is None: - exprY = f'x {1 - opacity} * x y + {neutral[0]} - {opacity} * +' if mode == 'softlight' else f'x {1 - opacity} * x y 2 * + {neutral[0] * 2} - {opacity} * +' - exprUV = f'x {1 - opacity} * x y + {neutral[1]} - {opacity} * +' - last = core.std.Expr([clipa, clipb], expr=[exprY] if isGray else [exprY, exprUV]) - else: - exprY = f'x {range} z {opacity} * - * x y + {neutral[0]} - z * {opacity} * + {factor} *' if mode == 'softlight' else f'x {range} z {opacity} * - * x y 2 * + {neutral[0] * 2} - z * {opacity} * + {factor} *' - exprUV = f'x {range} z {opacity} * - * x y + {neutral[1]} - z * {opacity} * + {factor} *' - last = core.std.Expr([clipa, clipb, mask], expr=[exprY] if isGray else [exprY, exprUV]) + overlay = overlay.std.Crop(left=cl, right=cr, top=ct, bottom=cb) + overlay = overlay.std.AddBorders(left=pl, right=pr, top=pt, bottom=pb) + mask = mask.std.Crop(left=cl, right=cr, top=ct, bottom=cb) + mask = mask.std.AddBorders(left=pl, right=pr, top=pt, bottom=pb, color=[0] * mask.format.num_planes) + + if opacity < 1: + mask = mask.std.Expr(expr=[f'x {opacity} *']) + + if mode == 'normal': + pass + elif mode == 'addition': + expr = f'x y +' + elif mode == 'average': + expr = f'x y + 2 /' + elif mode == 'burn': + expr = f'x 0 <= x 0 {peak} {peak} y - {factor} * x / - max ?' + elif mode == 'darken': + expr = f'x y min' elif mode == 'difference': - exprY = f'x {1 - opacity} * x y - abs {neutral[0]} + {opacity} * +' - exprUV = f'x {1 - opacity} * x y - abs {neutral[1]} + {opacity} * +' - last = core.std.Expr([clipa, clipb], expr=[exprY] if isGray else [exprY, exprUV]) - last = core.std.MaskedMerge(clipa, last, mask, first_plane=True) + expr = f'x y - abs' + elif mode == 'divide': + expr = f'y 0 <= {peak} {peak} x * y / ?' + elif mode == 'dodge': + expr = f'x {peak} >= x {peak} y {factor} * {peak} x - / min ?' elif mode == 'exclusion': - if not isGray: - clipb = core.std.ShufflePlanes([clipb], planes=[0, 0, 0], colorfamily=clipb.format.color_family) - - if mask is None: - if sample_type == vs.INTEGER: - exprY = exprUV = f'x {1 - opacity} * {peak} x - y * {peak} y - x * + {factor} * {opacity} * +' - else: - exprY = f'x {1 - opacity} * {peak} x - y * {peak} y - x * + {factor} * {opacity} * +' - exprUV = f'x 0.5 + {1 - opacity} * {peak} x 0.5 + - y * {peak} y - x 0.5 + * + {factor} * {opacity} * + 0.5 -' - last = core.std.Expr([clipa, clipb], expr=[exprY] if isGray else [exprY, exprUV]) - else: - if sample_type == vs.INTEGER: - exprY = exprUV = f'x {range} z {opacity} * - * {peak} x - y * {peak} y - x * + {factor} * z * {opacity} * + {factor} *' - else: - exprY = f'x {range} z {opacity} * - * {peak} x - y * {peak} y - x * + {factor} * z * {opacity} * + {factor} *' - exprUV = f'x 0.5 + {range} z {opacity} * - * {peak} x 0.5 + - y * {peak} y - x 0.5 + * + {factor} * z * {opacity} * + {factor} * 0.5 -' - last = core.std.Expr([clipa, clipb, mask], expr=[exprY] if isGray else [exprY, exprUV]) + expr = f'x y + 2 x * y * {peak} / -' + elif mode == 'extremity': + expr = f'{peak} x - y - abs' + elif mode == 'freeze': + expr = f'y 0 <= 0 {peak} {peak} x - dup * y / {peak} min - ?' + elif mode == 'glow': + expr = f'x {peak} >= x {peak} y y * {peak} x - / min ?' + elif mode == 'hardlight': + expr = f'y {neutral} < 2 y x * {peak} / * {peak} 2 {peak} y - {peak} x - * {peak} / * - ?' + elif mode == 'hardmix': + expr = f'x {peak} y - < 0 {peak} ?' + elif mode == 'heat': + expr = f'x 0 <= 0 {peak} {peak} y - dup * x / {peak} min - ?' + elif mode == 'lighten': + expr = f'x y max' + elif mode == 'linearlight': + expr = f'y {neutral} < y 2 x * + {peak} - y 2 x {neutral} - * + ?' + elif mode == 'multiply': + expr = f'x y * {peak} /' + elif mode == 'negation': + expr = f'{peak} {peak} x - y - abs -' + elif mode == 'overlay': + expr = f'x {neutral} < 2 x y * {peak} / * {peak} 2 {peak} x - {peak} y - * {peak} / * - ?' + elif mode == 'phoenix': + expr = f'x y min x y max - {peak} +' + elif mode == 'pinlight': + expr = f'y {neutral} < x 2 y * min x 2 y {neutral} - * max ?' + elif mode == 'reflect': + expr = f'y {peak} >= y {peak} x x * {peak} y - / min ?' + elif mode =='screen': + expr = f'{peak} {peak} x - {peak} y - * {peak} / -' + # elif mode == 'softlight': # Expr hangs for unknown reason. Disabled until Expr gets fixed. + # expr = f'x {neutral} > y {peak} y - x {neutral} - * {neutral} / 0.5 y {neutral} - abs {peak} / - * + y y {neutral} x - {neutral} / * 0.5 y {neutral} - abs {peak} / - * - ?' + elif mode == 'subtract': + expr = f'x y -' + elif mode == 'vividlight': + expr = f'x {neutral} < x 0 <= 2 x * 0 {peak} {peak} y - {factor} * 2 x * / - max ? 2 x {neutral} - * {peak} >= 2 x {neutral} - * {peak} y {factor} * {peak} 2 x {neutral} - * - / min ? ?' else: raise vs.Error("Overlay: invalid 'mode' specified") + if mode != 'normal': + overlay = core.std.Expr([overlay, base], expr=[expr if i in planes else '' for i in range(base.format.num_planes)]) + last = core.std.MaskedMerge(base, overlay, mask, planes=planes) + # Return padded clip - if clipa_orig is not None: - last = core.resize.Point(last, format=clipa_orig.format.id, matrix_in_s=matrix_in_s, matrix_s=matrix_s) + if base_orig is not None: + last = core.resize.Point(last, format=base_orig.format) return last def Padding(clip, left=0, right=0, top=0, bottom=0): if not isinstance(clip, vs.VideoNode): raise vs.Error('Padding: This is not a clip') + if left < 0 or right < 0 or top < 0 or bottom < 0: raise vs.Error('Padding: border size to pad must not be negative') - return core.resize.Point(clip, clip.width + left + right, clip.height + top + bottom, src_left=-left, src_top=-top, src_width=clip.width + left + right, src_height=clip.height + top + bottom) + return clip.resize.Point(clip.width + left + right, clip.height + top + bottom, src_left=-left, src_top=-top, src_width=clip.width + left + right, src_height=clip.height + top + bottom) def Resize(src, w, h, sx=None, sy=None, sw=None, sh=None, kernel=None, taps=None, a1=None, a2=None, invks=None, invkstaps=None, css=None, planes=None, @@ -5542,12 +5630,12 @@ def Resize(src, w, h, sx=None, sy=None, sw=None, sh=None, kernel=None, taps=None nrb = thr < sr < thr + 1 nrf = sr < thr + 1 and noring - main = core.fmtc.resample(src, w, h, sx, sy, sw, sh, kernel=kernel, taps=taps, a1=a1, a2=a2, invks=invks, invkstaps=invkstaps, css=css, planes=planes, center=center, - cplace=cplace, cplaces=cplaces, cplaced=cplaced, interlaced=interlaced, interlacedd=interlacedd, tff=tff, tffd=tffd, flt=flt) + main = src.fmtc.resample(w, h, sx, sy, sw, sh, kernel=kernel, taps=taps, a1=a1, a2=a2, invks=invks, invkstaps=invkstaps, css=css, planes=planes, center=center, + cplace=cplace, cplaces=cplaces, cplaced=cplaced, interlaced=interlaced, interlacedd=interlacedd, tff=tff, tffd=tffd, flt=flt) if nrf: - nrng = core.fmtc.resample(src, w, h, sx, sy, sw, sh, kernel='gauss', taps=taps, a1=100, invks=invks, invkstaps=invkstaps, css=css, planes=planes, center=center, - cplace=cplace, cplaces=cplaces, cplaced=cplaced, interlaced=interlaced, interlacedd=interlacedd, tff=tff, tffd=tffd, flt=flt) + nrng = src.fmtc.resample(w, h, sx, sy, sw, sh, kernel='gauss', taps=taps, a1=100, invks=invks, invkstaps=invkstaps, css=css, planes=planes, center=center, + cplace=cplace, cplaces=cplaces, cplaced=cplaced, interlaced=interlaced, interlacedd=interlacedd, tff=tff, tffd=tffd, flt=flt) last = core.rgvs.Repair(main, nrng, mode=[1]) if nrb: @@ -5571,7 +5659,7 @@ def Resize(src, w, h, sx=None, sy=None, sw=None, sh=None, kernel=None, taps=None for i in range(last.format.num_planes): if planes[i] != 1: planes2.append(i) - return core.fmtc.bitdepth(last, bits=bits, planes=planes2, fulls=fulls, fulld=fulld, dmode=dmode, ampo=ampo, ampn=ampn, dyn=dyn, staticnoise=staticnoise, patsize=patsize) + return last.fmtc.bitdepth(bits=bits, planes=planes2, fulls=fulls, fulld=fulld, dmode=dmode, ampo=ampo, ampn=ampn, dyn=dyn, staticnoise=staticnoise, patsize=patsize) def SCDetect(clip, threshold=None): @@ -5586,11 +5674,11 @@ def copy_property(n, f): sc = clip if clip.format.color_family == vs.RGB: - sc = core.resize.Bicubic(clip, format=vs.GRAY8, matrix_s='709') - sc = core.misc.SCDetect(sc, threshold) + sc = clip.resize.Bicubic(format=vs.GRAY8, matrix_s='709') + sc = sc.misc.SCDetect(threshold=threshold) if clip.format.color_family == vs.RGB: - sc = core.std.ModifyFrame(clip, clips=[clip, sc], selector=copy_property) + sc = clip.std.ModifyFrame(clips=[clip, sc], selector=copy_property) return sc @@ -5598,7 +5686,7 @@ def Weave(clip, tff): if not isinstance(clip, vs.VideoNode): raise vs.Error('Weave: This is not a clip') - return core.std.DoubleWeave(clip, tff)[::2] + return clip.std.DoubleWeave(tff=tff)[::2] ######################################## @@ -5610,33 +5698,36 @@ def Weave(clip, tff): # Parameters: # radius (int) - Spatial radius for contra-sharpening (1-3). Default is 2 for HD / 1 for SD # rep (int) - Mode of repair to limit the difference. Default is 13 -# planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. Default is [0, 1, 2] -def ContraSharpening(denoised, original, radius=None, rep=13, planes=[0, 1, 2]): +# planes (int[]) - Whether to process the corresponding plane. The other planes will be passed through unchanged. +def ContraSharpening(denoised, original, radius=None, rep=13, planes=None): if not (isinstance(denoised, vs.VideoNode) and isinstance(original, vs.VideoNode)): raise vs.Error('ContraSharpening: This is not a clip') + if denoised.format.id != original.format.id: - raise vs.Error('ContraSharpening: Both clips must have the same format') + raise vs.Error('ContraSharpening: Clips must be the same format') neutral = 1 << (denoised.format.bits_per_sample - 1) - if denoised.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + + if planes is None: + planes = list(range(denoised.format.num_planes)) + + elif isinstance(planes, int): planes = [planes] if radius is None: radius = 2 if denoised.width > 1024 or denoised.height > 576 else 1 - s = MinBlur(denoised, radius, planes=planes) # damp down remaining spots of the denoised clip + s = MinBlur(denoised, r=radius, planes=planes) # damp down remaining spots of the denoised clip matrix1 = [1, 2, 1, 2, 4, 2, 1, 2, 1] matrix2 = [1, 1, 1, 1, 1, 1, 1, 1, 1] if radius <= 1: - RG11 = core.std.Convolution(s, matrix=matrix1, planes=planes) + RG11 = s.std.Convolution(matrix=matrix1, planes=planes) elif radius == 2: - RG11 = core.std.Convolution(s, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11 = s.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) else: - RG11 = core.std.Convolution(s, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11 = s.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) ssD = core.std.MakeDiff(s, RG11, planes=planes) # the difference of a simple kernel blur allD = core.std.MakeDiff(original, denoised, planes=planes) # the difference achieved by the denoising @@ -5648,13 +5739,14 @@ def ContraSharpening(denoised, original, radius=None, rep=13, planes=[0, 1, 2]): # MinBlur by Didée (http://avisynth.nl/index.php/MinBlur) # Nifty Gauss/Median combination -def MinBlur(clp, r=1, planes=[0, 1, 2]): +def MinBlur(clp, r=1, planes=None): if not isinstance(clp, vs.VideoNode): raise vs.Error('MinBlur: This is not a clip') - if clp.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + if planes is None: + planes = list(range(clp.format.num_planes)) + + elif isinstance(planes, int): planes = [planes] matrix1 = [1, 2, 1, 2, 4, 2, 1, 2, 1] @@ -5662,89 +5754,91 @@ def MinBlur(clp, r=1, planes=[0, 1, 2]): if r <= 0: RG11 = sbr(clp, planes=planes) - RG4 = core.std.Median(clp, planes=planes) + RG4 = clp.std.Median(planes=planes) elif r == 1: - RG11 = core.std.Convolution(clp, matrix=matrix1, planes=planes) - RG4 = core.std.Median(clp, planes=planes) + RG11 = clp.std.Convolution(matrix=matrix1, planes=planes) + RG4 = clp.std.Median(planes=planes) elif r == 2: - RG11 = core.std.Convolution(clp, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) - RG4 = core.ctmf.CTMF(clp, radius=2, planes=planes) + RG11 = clp.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG4 = clp.ctmf.CTMF(radius=2, planes=planes) else: - RG11 = core.std.Convolution(clp, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11 = clp.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) if clp.format.bits_per_sample == 16: s16 = clp - RG4 = core.fmtc.bitdepth(clp, bits=12, planes=planes, dmode=1).ctmf.CTMF(radius=3, planes=planes).fmtc.bitdepth(bits=16, planes=planes) + RG4 = clp.fmtc.bitdepth(bits=12, planes=planes, dmode=1).ctmf.CTMF(radius=3, planes=planes).fmtc.bitdepth(bits=16, planes=planes) RG4 = mvf.LimitFilter(s16, RG4, thr=0.0625, elast=2, planes=planes) else: - RG4 = core.ctmf.CTMF(clp, radius=3, planes=planes) + RG4 = clp.ctmf.CTMF(radius=3, planes=planes) expr = 'x y - x z - * 0 < x x y - abs x z - abs < y z ? ?' return core.std.Expr([clp, RG11, RG4], expr=[expr if i in planes else '' for i in range(clp.format.num_planes)]) # make a highpass on a blur's difference (well, kind of that) -def sbr(c, r=1, planes=[0, 1, 2]): +def sbr(c, r=1, planes=None): if not isinstance(c, vs.VideoNode): raise vs.Error('sbr: This is not a clip') - neutral = 1 << (c.format.bits_per_sample - 1) - if c.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + neutral = 1 << (c.format.bits_per_sample - 1) if c.format.sample_type == vs.INTEGER else 0.0 + + if planes is None: + planes = list(range(c.format.num_planes)) + elif isinstance(planes, int): planes = [planes] matrix1 = [1, 2, 1, 2, 4, 2, 1, 2, 1] matrix2 = [1, 1, 1, 1, 1, 1, 1, 1, 1] if r <= 1: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes) + RG11 = c.std.Convolution(matrix=matrix1, planes=planes) elif r == 2: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11 = c.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) else: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11 = c.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) RG11D = core.std.MakeDiff(c, RG11, planes=planes) if r <= 1: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes) + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes) elif r == 2: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes) else: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes).std.Convolution(matrix=matrix2, planes=planes).std.Convolution(matrix=matrix2, planes=planes) expr = f'x y - x {neutral} - * 0 < {neutral} x y - abs x {neutral} - abs < x y - {neutral} + x ? ?' RG11DD = core.std.Expr([RG11D, RG11DS], expr=[expr if i in planes else '' for i in range(c.format.num_planes)]) return core.std.MakeDiff(c, RG11DD, planes=planes) -def sbrV(c, r=1, planes=[0, 1, 2]): +def sbrV(c, r=1, planes=None): if not isinstance(c, vs.VideoNode): raise vs.Error('sbrV: This is not a clip') - neutral = 1 << (c.format.bits_per_sample - 1) - if c.format.color_family == vs.GRAY: - planes = [0] - if isinstance(planes, int): + neutral = 1 << (c.format.bits_per_sample - 1) if c.format.sample_type == vs.INTEGER else 0.0 + + if planes is None: + planes = list(range(c.format.num_planes)) + elif isinstance(planes, int): planes = [planes] matrix1 = [1, 2, 1] matrix2 = [1, 4, 6, 4, 1] if r <= 1: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes, mode='v') + RG11 = c.std.Convolution(matrix=matrix1, planes=planes, mode='v') elif r == 2: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') + RG11 = c.std.Convolution(matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') else: - RG11 = core.std.Convolution(c, matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') + RG11 = c.std.Convolution(matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') RG11D = core.std.MakeDiff(c, RG11, planes=planes) if r <= 1: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes, mode='v') + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes, mode='v') elif r == 2: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') else: - RG11DS = core.std.Convolution(RG11D, matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') + RG11DS = RG11D.std.Convolution(matrix=matrix1, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v').std.Convolution(matrix=matrix2, planes=planes, mode='v') expr = f'x y - x {neutral} - * 0 < {neutral} x y - abs x {neutral} - abs < x y - {neutral} + x ? ?' RG11DD = core.std.Expr([RG11D, RG11DS], expr=[expr if i in planes else '' for i in range(c.format.num_planes)]) @@ -5759,17 +5853,22 @@ def sbrV(c, r=1, planes=[0, 1, 2]): def DitherLumaRebuild(src, s0=2.0, c=0.0625, chroma=True): if not isinstance(src, vs.VideoNode): raise vs.Error('DitherLumaRebuild: This is not a clip') + if src.format.color_family == vs.RGB: - raise vs.Error('DitherLumaRebuild: RGB color family is not supported') + raise vs.Error('DitherLumaRebuild: RGB format is not supported') - shift = src.format.bits_per_sample - 8 - neutral = 128 << shift + + isGray = (src.format.color_family == vs.GRAY) + isInteger = (src.format.sample_type == vs.INTEGER) + + shift = src.format.bits_per_sample - 8 + neutral = 128 << shift if isInteger else 0.0 k = (s0 - 1) * c - t = f'x {16 << shift} - {219 << shift} / 0 max 1 min' - e = f'{k} {1 + c} {(1 + c) * c} {t} {c} + / - * {t} 1 {k} - * + {256 << shift} *' - return core.std.Expr([src], expr=[e] if isGray else [e, f'x {neutral} - 128 * 112 / {neutral} +' if chroma else '']) + t = f'x {16 << shift if isInteger else 16 / 255} - {219 << shift if isInteger else 219 / 255} / 0 max 1 min' + e = f'{k} {1 + c} {(1 + c) * c} {t} {c} + / - * {t} 1 {k} - * + {256 << shift if isInteger else 256 / 255} *' + return src.std.Expr(expr=[e] if isGray else [e, f'x {neutral} - 128 * 112 / {neutral} +' if chroma else '']) #============================================================================= @@ -5803,9 +5902,9 @@ def mt_expand_multi(src, mode='rectangle', planes=None, sw=1, sh=1): mode_m = None if mode_m is not None: - return mt_expand_multi(core.std.Maximum(src, planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) - else: - return src + src = mt_expand_multi(src.std.Maximum(planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) + + return src def mt_inpand_multi(src, mode='rectangle', planes=None, sw=1, sh=1): @@ -5822,9 +5921,9 @@ def mt_inpand_multi(src, mode='rectangle', planes=None, sw=1, sh=1): mode_m = None if mode_m is not None: - return mt_inpand_multi(core.std.Minimum(src, planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) - else: - return src + src = mt_inpand_multi(src.std.Minimum(planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) + + return src def mt_inflate_multi(src, planes=None, radius=1): diff --git a/nnedi3_rpow2.py b/nnedi3_rpow2.py new file mode 100644 index 0000000..c0c032c --- /dev/null +++ b/nnedi3_rpow2.py @@ -0,0 +1,92 @@ +import vapoursynth as vs + + +def nnedi3_rpow2(clip, rfactor=2, width=None, height=None, correct_shift=True, + kernel="spline36", nsize=0, nns=3, qual=None, etype=None, pscrn=None, + opt=True, int16_prescreener=None, int16_predictor=None, exp=None): + """nnedi3_rpow2 is for enlarging images by powers of 2. + + Args: + rfactor (int): Image enlargement factor. + Must be a power of 2 in the range [2 to 1024]. + correct_shift (bool): If False, the shift is not corrected. + The correction is accomplished by using the subpixel + cropping capability of fmtc's resizers. + width (int): If correcting the image center shift by using the + "correct_shift" parameter, width/height allow you to set a + new output resolution. + kernel (string): Sets the resizer used for correcting the image + center shift that nnedi3_rpow2 introduces. This can be any of + fmtc kernels, such as "cubic", "spline36", etc. + spline36 is the default one. + nnedi3_args (mixed): For help with nnedi3 args + refert to nnedi3 documentation. + """ + core = vs.get_core() + + # Setting up variables + + plugins = core.get_plugins() + + if width is None: + width = clip.width*rfactor + if height is None: + height = clip.height*rfactor + hshift = 0.0 + vshift = -0.5 + pkdnnedi = dict(dh=True, nsize=nsize, nns=nns, qual=qual, etype=etype, + pscrn=pscrn, opt=opt, int16_prescreener=int16_prescreener, + int16_predictor=int16_predictor, exp=exp) + pkdchroma = dict(kernel=kernel, sy=-0.5, planes=[2, 3, 3]) + + tmp = 1 + times = 0 + while tmp < rfactor: + tmp *= 2 + times += 1 + + # Checks + + if rfactor < 2 or rfactor > 1024: + raise ValueError("nnedi3_rpow2: rfactor must be between 2 and 1024") + + if tmp != rfactor: + raise ValueError("nnedi3_rpow2: rfactor must be a power of 2") + + if 'com.deinterlace.nnedi3' not in plugins: + raise RuntimeError("nnedi3_rpow2: nnedi3 plugin is required") + if correct_shift or clip.format.subsampling_h: + if 'fmtconv' not in plugins: + raise RuntimeError("nnedi3_rpow2: fmtconv plugin is required") + + # Processing + + last = clip + + for i in range(times): + field = 1 if i == 0 else 0 + last = core.nnedi3.nnedi3(last, field=field, **pkdnnedi) + last = core.std.Transpose(last) + if last.format.subsampling_w: + # Apparently always using field=1 for the horizontal pass somehow + # keeps luma/chroma alignment. + field = 1 + hshift = hshift*2 - 0.5 + else: + hshift = -0.5 + last = core.nnedi3.nnedi3(last, field=field, **pkdnnedi) + last = core.std.Transpose(last) + + # Correct vertical shift of the chroma. + + if clip.format.subsampling_h: + last = core.fmtc.resample(last, w=last.width, h=last.height, **pkdchroma) + + if correct_shift is True: + last = core.fmtc.resample(last, w=width, h=height, kernel=kernel, + sx=hshift, sy=vshift) + + if last.format.id != clip.format.id: + last = core.fmtc.bitdepth(last, csp=clip.format.id) + + return last diff --git a/psharpen.py b/psharpen.py deleted file mode 100644 index 952d801..0000000 --- a/psharpen.py +++ /dev/null @@ -1,86 +0,0 @@ -import vapoursynth as vs -""" From https://gist.github.com/4re/2545a281e3f17ba6ef82#file-psharpen-py """ - -def _clamp(minimum, x, maximum): - return int(max(minimum, min(round(x), maximum))) - - -def _m4(x, m=4.0): - return 16 if x < 16 else int(round(x / m) * m) - - -def psharpen(clip, strength=25, threshold=75, ss_x=1.0, ss_y=1.0, - dest_x=None, dest_y=None): - """From http://forum.doom9.org/showpost.php?p=683344&postcount=28 - - Sharpeing function similar to LimitedSharpenFaster. - - Args: - strength (int): Strength of the sharpening. - threshold (int): Controls "how much" to be sharpened. - ss_x (float): Supersampling factor (reduce aliasing on edges). - ss_y (float): Supersampling factor (reduce aliasing on edges). - dest_x (int): Output resolution after sharpening. - dest_y (int): Output resolution after sharpening. - """ - core = vs.get_core() - - ox = clip.width - oy = clip.height - - bd = clip.format.bits_per_sample - max_ = 2 ** bd - 1 - scl = (max_ + 1) // 256 - x = 'x {} /'.format(scl) if bd != 8 else 'x' - y = 'y {} /'.format(scl) if bd != 8 else 'y' - - if dest_x is None: - dest_x = ox - if dest_y is None: - dest_y = oy - - strength = _clamp(0, strength, 100) - threshold = _clamp(0, threshold, 100) - - if ss_x < 1.0: - ss_x = 1.0 - if ss_y < 1.0: - ss_y = 1.0 - - if ss_x != 1.0 or ss_y != 1.0: - clip = core.resize.Lanczos(clip, width=_m4(ox*ss_x), height=_m4(oy*ss_y)) - - orig = clip - - if orig.format.num_planes != 1: - clip = core.std.ShufflePlanes(clips=clip, planes=[0], - colorfamily=vs.GRAY) - val = clip - - max_ = core.std.Maximum(clip) - min_ = core.std.Minimum(clip) - - nmax = core.std.Expr([max_, min_], ['x y -']) - nval = core.std.Expr([val, min_], ['x y -']) - - s = strength/100.0 - t = threshold/100.0 - x0 = t * (1.0 - s) / (1.0 - (1.0 - t) * (1.0 - s)) - - expr = ('{x} {y} / 2 * 1 - abs {x0} < {s} 1 = {x} {y} 2 / = 0 {y} 2 / ? ' - '{x} {y} / 2 * 1 - abs 1 {s} - / ? {x} {y} / 2 * 1 - abs 1 {t} - ' - '* {t} + ? {x} {y} 2 / > 1 -1 ? * 1 + {y} * 2 / {scl} *').format( - x=x, y=y, x0=x0, t=t, s=s, scl=scl) - - nval = core.std.Expr([nval, nmax], [expr]) - - val = core.std.Expr([nval, min_], ['x y +']) - - if orig.format.num_planes != 1: - clip = core.std.ShufflePlanes(clips=[val, orig], planes=[0, 1, 2], - colorfamily=orig.format.color_family) - - if ss_x != 1.0 or ss_y != 1.0 or dest_x != ox or dest_y != oy: - clip = core.resize.Lanczos(clip, width=dest_x, height=dest_y) - - return clip diff --git a/vsTAAmbk.py b/vsTAAmbk.py new file mode 100644 index 0000000..25c1fea --- /dev/null +++ b/vsTAAmbk.py @@ -0,0 +1,715 @@ +import vapoursynth as vs +import mvsfunc as mvf +import havsfunc as haf +import functools + +MODULE_NAME = 'vsTAAmbk' + + +class Clip: + def __init__(self, clip): + self.core = vs.get_core() + self.clip = clip + if not isinstance(clip, vs.VideoNode): + raise TypeError(MODULE_NAME + ': clip is invalid.') + self.clip_width = clip.width + self.clip_height = clip.height + self.clip_bits = clip.format.bits_per_sample + self.clip_color_family = clip.format.color_family + self.clip_sample_type = clip.format.sample_type + self.clip_id = clip.format.id + self.clip_subsample_w = clip.format.subsampling_w + self.clip_subsample_h = clip.format.subsampling_h + self.clip_is_gray = True if clip.format.num_planes == 1 else False + # Register format for GRAY10 + vs.GRAY10 = self.core.register_format(vs.GRAY, vs.INTEGER, 10, 0, 0).id + + +class AAParent(Clip): + def __init__(self, clip, strength=0.0, down8=False): + super(AAParent, self).__init__(clip) + self.aa_clip = self.clip + self.dfactor = 1 - max(min(strength, 0.5), 0) + self.dw = round(self.clip_width * self.dfactor / 4) * 4 + self.dh = round(self.clip_height * self.dfactor / 4) * 4 + self.upw4 = round(self.dw * 0.375) * 4 + self.uph4 = round(self.dh * 0.375) * 4 + self.down8 = down8 + self.process_depth = self.clip_bits + if down8 is True: + self.down_8() + if self.dfactor != 1: + self.aa_clip = self.resize(self.aa_clip, self.dw, self.dh, shift=0) + if self.clip_color_family is vs.GRAY: + if self.clip_sample_type is not vs.INTEGER: + raise TypeError(MODULE_NAME + ': clip must be integer format.') + else: + raise TypeError(MODULE_NAME + ': clip must be GRAY family.') + + def resize(self, clip, w, h, shift): + try: + resized = self.core.resize.Spline36(clip, w, h, src_top=shift) + except vs.Error: + resized = self.core.fmtc.resample(clip, w, h, sy=shift) + if resized.format.bits_per_sample != self.process_depth: + mvf.Depth(resized, self.process_depth) + return resized + + def down_8(self): + self.process_depth = 8 + self.aa_clip = mvf.Depth(self.aa_clip, 8) + + def output(self, aaed): + if self.process_depth != self.clip_bits: + return mvf.LimitFilter(self.clip, mvf.Depth(aaed, self.clip_bits), thr=1.0, elast=2.0) + else: + return aaed + + +class AANnedi3(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AANnedi3, self).__init__(clip, strength, down8) + self.nnedi3_args = { + 'nsize': args.get('nsize', 3), + 'nns': args.get('nns', 1), + 'qual': args.get('qual', 2), + } + self.opencl = args.get('opencl', False) + if self.opencl is True: + try: + self.nnedi3 = self.core.nnedi3cl.NNEDI3CL + self.nnedi3_args['device'] = args.get('opencl_device', 0) + except AttributeError: + try: + self.nnedi3 = self.core.znedi3.nnedi3 + except AttributeError: + self.nnedi3 = self.core.nnedi3.nnedi3 + else: + try: + self.nnedi3 = self.core.znedi3.nnedi3 + except AttributeError: + self.nnedi3 = self.core.nnedi3.nnedi3 + + def out(self): + aaed = self.nnedi3(self.aa_clip, field=1, dh=True, **self.nnedi3_args) + aaed = self.resize(aaed, self.clip_width, self.clip_height, -0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.nnedi3(aaed, field=1, dh=True, **self.nnedi3_args) + aaed = self.resize(aaed, self.clip_height, self.clip_width, -0.5) + aaed = self.core.std.Transpose(aaed) + return self.output(aaed) + + +class AANnedi3SangNom(AANnedi3): + def __init__(self, clip, strength=0, down8=False, **args): + super(AANnedi3SangNom, self).__init__(clip, strength, down8, **args) + self.aa = args.get('aa', 48) + + def out(self): + aaed = self.nnedi3(self.aa_clip, field=1, dh=True, **self.nnedi3_args) + aaed = self.resize(aaed, self.clip_width, self.uph4, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.nnedi3(aaed, field=1, dh=True, **self.nnedi3_args) + aaed = self.resize(aaed, self.uph4, self.upw4, shift=-0.5) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.resize(aaed, self.clip_width, self.clip_height, shift=0) + return self.output(aaed) + + +class AANnedi3UpscaleSangNom(AANnedi3SangNom): + def __init__(self, clip, strength=0, down8=False, **args): + super(AANnedi3UpscaleSangNom, self).__init__(clip, strength, down8, **args) + self.nnedi3_args = { + 'nsize': args.get('nsize', 1), + 'nns': args.get('nns', 3), + 'qual': args.get('qual', 2), + } + if self.opencl is True: + self.nnedi3_args['device'] = args.get('opencl_device', 0) + + +class AAEedi3(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AAEedi3, self).__init__(clip, strength, down8) + self.eedi3_args = { + 'alpha': args.get('alpha', 0.5), + 'beta': args.get('beta', 0.2), + 'gamma': args.get('gamma', 20), + 'nrad': args.get('nrad', 3), + 'mdis': args.get('mdis', 30), + } + + self.opencl = args.get('opencl', False) + if self.opencl is True: + try: + self.eedi3 = self.core.eedi3m.EEDI3CL + self.eedi3_args['device'] = args.get('opencl_device', 0) + except AttributeError: + self.eedi3 = self.core.eedi3.eedi3 + if self.process_depth > 8: + self.down_8() + else: + try: + self.eedi3 = self.core.eedi3m.EEDI3 + except AttributeError: + self.eedi3 = self.core.eedi3.eedi3 + if self.process_depth > 8: + self.down_8() + + ''' + def build_eedi3_mask(self, clip): + eedi3_mask = self.core.nnedi3.nnedi3(clip, field=1, show_mask=True) + eedi3_mask = self.core.std.Expr([eedi3_mask, clip], "x 254 > x y - 0 = not and 255 0 ?") + eedi3_mask_turn = self.core.std.Transpose(eedi3_mask) + if self.dfactor != 1: + eedi3_mask_turn = self.core.resize.Bicubic(eedi3_mask_turn, self.clip_height, self.dw) + return eedi3_mask, eedi3_mask_turn + ''' + + def out(self): + aaed = self.eedi3(self.aa_clip, field=1, dh=True, **self.eedi3_args) + aaed = self.resize(aaed, self.dw, self.clip_height, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.eedi3(aaed, field=1, dh=True, **self.eedi3_args) + aaed = self.resize(aaed, self.clip_height, self.clip_width, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + return self.output(aaed) + + +class AAEedi3SangNom(AAEedi3): + def __init__(self, clip, strength=0, down8=False, **args): + super(AAEedi3SangNom, self).__init__(clip, strength, down8, **args) + self.aa = args.get('aa', 48) + + ''' + def build_eedi3_mask(self, clip): + eedi3_mask = self.core.nnedi3.nnedi3(clip, field=1, show_mask=True) + eedi3_mask = self.core.std.Expr([eedi3_mask, clip], "x 254 > x y - 0 = not and 255 0 ?") + eedi3_mask_turn = self.core.std.Transpose(eedi3_mask) + eedi3_mask_turn = self.core.resize.Bicubic(eedi3_mask_turn, self.uph4, self.dw) + return eedi3_mask, eedi3_mask_turn + ''' + + def out(self): + aaed = self.eedi3(self.aa_clip, field=1, dh=True, **self.eedi3_args) + aaed = self.resize(aaed, self.dw, self.uph4, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.eedi3(aaed, field=1, dh=True, **self.eedi3_args) + aaed = self.resize(aaed, self.uph4, self.upw4, shift=-0.5) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.resize(aaed, self.clip_width, self.clip_height, shift=0) + return self.output(aaed) + + +class AAEedi2(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AAEedi2, self).__init__(clip, strength, down8) + self.mthresh = args.get('mthresh', 10) + self.lthresh = args.get('lthresh', 20) + self.vthresh = args.get('vthresh', 20) + self.maxd = args.get('maxd', 24) + self.nt = args.get('nt', 50) + + def out(self): + aaed = self.core.eedi2.EEDI2(self.aa_clip, 1, self.mthresh, self.lthresh, self.vthresh, maxd=self.maxd, + nt=self.nt) + aaed = self.resize(aaed, self.dw, self.clip_height, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.eedi2.EEDI2(aaed, 1, self.mthresh, self.lthresh, self.vthresh, maxd=self.maxd, nt=self.nt) + aaed = self.resize(aaed, self.clip_height, self.clip_width, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + return self.output(aaed) + + +class AAEedi2SangNom(AAEedi2): + def __init__(self, clip, strength=0, down8=False, **args): + super(AAEedi2SangNom, self).__init__(clip, strength, down8, **args) + self.aa = args.get('aa', 48) + + def out(self): + aaed = self.core.eedi2.EEDI2(self.aa_clip, 1, self.mthresh, self.lthresh, self.vthresh, maxd=self.maxd, + nt=self.nt) + aaed = self.resize(aaed, self.dw, self.uph4, shift=-0.5) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.eedi2.EEDI2(aaed, 1, self.mthresh, self.lthresh, self.vthresh, maxd=self.maxd, nt=self.nt) + aaed = self.resize(aaed, self.uph4, self.upw4, shift=-0.5) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.resize(aaed, self.clip_width, self.clip_height, shift=0) + return self.output(aaed) + + +class AASpline64NRSangNom(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AASpline64NRSangNom, self).__init__(clip, strength, down8) + self.aa = args.get('aa', 48) + + def out(self): + aa_spline64 = self.core.fmtc.resample(self.aa_clip, self.upw4, self.uph4, kernel='spline64') + aa_spline64 = mvf.Depth(aa_spline64, self.process_depth) + aa_gaussian = self.core.fmtc.resample(self.aa_clip, self.upw4, self.uph4, kernel='gaussian', a1=100) + aa_gaussian = mvf.Depth(aa_gaussian, self.process_depth) + aaed = self.core.rgvs.Repair(aa_spline64, aa_gaussian, 1) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.resize(aaed, self.clip_width, self.clip_height, shift=0) + return self.output(aaed) + + +class AASpline64SangNom(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AASpline64SangNom, self).__init__(clip, strength, down8) + self.aa = args.get('aa', 48) + + def out(self): + aaed = self.core.fmtc.resample(self.aa_clip, self.clip_width, self.uph4, kernel="spline64") + aaed = mvf.Depth(aaed, self.process_depth) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(self.resize(aaed, self.clip_width, self.clip_height, 0)) + aaed = self.core.fmtc.resample(aaed, self.clip_height, self.upw4, kernel="spline64") + aaed = mvf.Depth(aaed, self.process_depth) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(self.resize(aaed, self.clip_height, self.clip_width, 0)) + return self.output(aaed) + + +class AAPointSangNom(AAParent): + def __init__(self, clip, strength=0, down8=False, **args): + super(AAPointSangNom, self).__init__(clip, 0, down8) + self.aa = args.get('aa', 48) + self.upw = self.clip_width * 2 + self.uph = self.clip_height * 2 + self.strength = strength # Won't use this + + def out(self): + aaed = self.core.resize.Point(self.aa_clip, self.upw, self.uph) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.core.sangnom.SangNom(aaed, aa=self.aa) + aaed = self.core.std.Transpose(aaed) + aaed = self.resize(aaed, self.clip_width, self.clip_height, 0) + return self.output(aaed) + + +def mask_sobel(mthr, opencl=False, opencl_device=-1, **kwargs): + core = vs.get_core() + if opencl is True: + try: + canny = functools.partial(core.tcanny.TCannyCL, device=opencl_device) + except AttributeError: + canny = core.tcanny.TCanny + else: + canny = core.tcanny.TCanny + mask_kwargs = { + 'gmmax': kwargs.get('gmmax', max(round(-0.14 * mthr + 61.87), 80)), + 'sigma': kwargs.get('sigma', 1.0), + 't_h': kwargs.get('t_h', 8.0), + 't_l': kwargs.get('t_l', 1.0), + } + return lambda clip: canny(clip, mode=1, op=2, **mask_kwargs) + + +def mask_prewitt(mthr, **kwargs): + core = vs.get_core() + + def wrapper(clip): + eemask_1 = core.std.Convolution(clip, [1, 1, 0, 1, 0, -1, 0, -1, -1], divisor=1, saturate=False) + eemask_2 = core.std.Convolution(clip, [1, 1, 1, 0, 0, 0, -1, -1, -1], divisor=1, saturate=False) + eemask_3 = core.std.Convolution(clip, [1, 0, -1, 1, 0, -1, 1, 0, -1], divisor=1, saturate=False) + eemask_4 = core.std.Convolution(clip, [0, -1, -1, 1, 0, -1, 1, 1, 0], divisor=1, saturate=False) + eemask = core.std.Expr([eemask_1, eemask_2, eemask_3, eemask_4], 'x y max z max a max') + eemask = core.std.Expr(eemask, 'x %d <= x 2 / x 1.4 pow ?' % mthr).rgvs.RemoveGrain(4).std.Inflate() + return eemask + + return wrapper + + +def mask_canny_continuous(mthr, opencl=False, opencl_device=-1, **kwargs): + core = vs.get_core() + if opencl is True: + try: + canny = functools.partial(core.tcanny.TCannyCL, device=opencl_device) + except AttributeError: + canny = core.tcanny.TCanny + else: + canny = core.tcanny.TCanny + mask_kwargs = { + 'sigma': kwargs.get('sigma', 1.0), + 't_h': kwargs.get('t_h', 8.0), + 't_l': kwargs.get('t_l', 1.0), + } + return lambda clip: (canny(clip, mode=1, **mask_kwargs) + .std.Expr('x %d <= x 2 / x 2 * ?' % mthr) + .rgvs.RemoveGrain(20 if clip.width > 1100 else 11)) + + +def mask_canny_binarized(mthr, opencl=False, opencl_device=-1, **kwargs): + core = vs.get_core() + if opencl is True: + try: + canny = functools.partial(core.tcanny.TCannyCL, device=opencl_device) + except AttributeError: + canny = core.tcanny.TCanny + else: + canny = core.tcanny.TCanny + mask_kwargs = { + 'sigma': kwargs.get('sigma', max(min(0.01772 * mthr + 0.4823, 5.0), 0.5)), + 't_h': kwargs.get('t_h', 8.0), + 't_l': kwargs.get('t_l', 1.0), + } + return lambda clip: canny(clip, mode=0, **mask_kwargs).std.Maximum() + + +def mask_tedge(mthr, **kwargs): + """ + Mainly based on Avisynth's plugin TEMmod(type=2) (https://github.com/chikuzen/TEMmod) + """ + core = vs.get_core() + mthr /= 5 + + def wrapper(clip): + # The Maximum value of these convolution is 21930, thus we have to store the result in 16bit clip + fake16 = core.std.Expr(clip, 'x', eval('vs.' + clip.format.name.upper()[:-1] + '16')) + ix = core.std.Convolution(fake16, [12, -74, 0, 74, -12], saturate=False, mode='h') + iy = core.std.Convolution(fake16, [-12, 74, 0, -74, 12], saturate=False, mode='v') + mask = core.std.Expr([ix, iy], 'x x * y y * + 0.0001 * sqrt 255.0 158.1 / * 0.5 +', + eval('vs.' + fake16.format.name.upper()[:-2] + '8')) + mask = core.std.Expr(mask, 'x %f <= x 2 / x 16 * ?' % mthr) + mask = core.std.Deflate(mask).rgvs.RemoveGrain(20 if clip.width > 1100 else 11) + return mask + + return wrapper + + +def mask_robert(mthr, **kwargs): + core = vs.get_core() + + def wrapper(clip): + m1 = core.std.Convolution(clip, [0, 0, 0, 0, -1, 0, 0, 0, 1], saturate=False) + m2 = core.std.Convolution(clip, [0, 0, 0, 0, 0, -1, 0, 1, 0], saturate=False) + mask = core.std.Expr([m1, m2], 'x y max').std.Expr('x %d < x 255 ?' % mthr).std.Inflate() + return mask + + return wrapper + + +def mask_msharpen(mthr, **kwargs): + core = vs.get_core() + mthr /= 5 + return lambda clip: core.msmoosh.MSharpen(clip, threshold=mthr, strength=0, mask=True) + + +def mask_lthresh(clip, mthrs, lthreshes, mask_kernel, inexpand, **kwargs): + core = vs.get_core() + gray8 = mvf.Depth(clip, 8) if clip.format.bits_per_sample != 8 else clip + gray8 = core.std.ShufflePlanes(gray8, 0, vs.GRAY) if clip.format.color_family != vs.GRAY else gray8 + mthrs = mthrs if isinstance(mthrs, (list, tuple)) else [mthrs] + lthreshes = lthreshes if isinstance(lthreshes, (list, tuple)) else [lthreshes] + inexpand = inexpand if isinstance(inexpand, (list, tuple)) and len(inexpand) >= 2 else [inexpand, 0] + + mask_kernels = [mask_kernel(mthr, **kwargs) for mthr in mthrs] + masks = [kernel(gray8) for kernel in mask_kernels] + mask = ((len(mthrs) - len(lthreshes) == 1) and functools.reduce( + lambda x, y: core.std.Expr([x, y, gray8], 'z %d < x y ?' % lthreshes[masks.index(y) - 1]), masks)) or masks[0] + mask = [mask] + [core.std.Maximum] * inexpand[0] + mask = functools.reduce(lambda x, y: y(x), mask) + mask = [mask] + [core.std.Minimum] * inexpand[1] + mask = functools.reduce(lambda x, y: y(x), mask) + + bps = clip.format.bits_per_sample + mask = (bps > 8 and core.std.Expr(mask, 'x %d *' % (((1 << clip.format.bits_per_sample) - 1) // 255), + eval('vs.GRAY' + str(bps)))) or mask + return lambda clip_a, clip_b, show=False: (show is False and core.std.MaskedMerge(clip_a, clip_b, mask)) or mask + + +def mask_fadetxt(clip, lthr=225, cthr=(2, 2), expand=2, fade_num=(5, 5), apply_range=None): + core = vs.get_core() + if clip.format.color_family != vs.YUV: + raise TypeError(MODULE_NAME + ': fadetxt mask: only yuv clips are supported.') + w = clip.width + h = clip.height + bps = clip.format.bits_per_sample + ceil = (1 << bps) - 1 + neutral = 1 << (bps - 1) + frame_count = clip.num_frames + + yuv = [core.std.ShufflePlanes(clip, i, vs.GRAY) for i in range(clip.format.num_planes)] + try: + yuv444 = [core.resize.Bicubic(plane, w, h, src_left=0.25) if yuv.index(plane) > 0 else plane for plane in yuv] + except vs.Error: + yuv444 = [mvf.Depth(core.fmtc.resample(plane, w, h, sx=0.25), 8) + if yuv.index(plane) > 0 else plane for plane in yuv] + cthr_u = cthr if not isinstance(cthr, (list, tuple)) else cthr[0] + cthr_v = cthr if not isinstance(cthr, (list, tuple)) else cthr[1] + expr = 'x %d > y %d - abs %d < and z %d - abs %d < and %d 0 ?' % (lthr, neutral, cthr_u, neutral, cthr_v, ceil) + mask = core.std.Expr(yuv444, expr) + mask = [mask] + [core.std.Maximum] * expand + mask = functools.reduce(lambda x, y: y(x), mask) + + if fade_num is not 0: + def shift_backward(n, mask_clip, num): + return mask_clip[frame_count - 1] if n + num > frame_count - 1 else mask_clip[n + num] + + def shift_forward(n, mask_clip, num): + return mask_clip[0] if n - num < 0 else mask_clip[n - num] + + fade_in_num = fade_num if not isinstance(fade_num, (list, tuple)) else fade_num[0] + fade_out_num = fade_num if not isinstance(fade_num, (list, tuple)) else fade_num[1] + fade_in = core.std.FrameEval(mask, functools.partial(shift_backward, mask_clip=mask, num=fade_in_num)) + fade_out = core.std.FrameEval(mask, functools.partial(shift_forward, mask_clip=mask, num=fade_out_num)) + mask = core.std.Expr([mask, fade_in, fade_out], 'x y max z max') + if apply_range is not None and isinstance(apply_range, (list, tuple)): + try: + blank = core.std.BlankClip(mask) + if 0 in apply_range: + mask = mask[apply_range[0]:apply_range[1]] + blank[apply_range[1]:] + elif frame_count in apply_range: + mask = blank[0:apply_range[0]] + mask[apply_range[0]:apply_range[1]] + else: + mask = blank[0:apply_range[0]] + mask[apply_range[0]:apply_range[1]] + blank[apply_range[1]:] + except vs.Error: + raise ValueError(MODULE_NAME + ': incorrect apply range setting. Possibly end less than start') + except IndexError: + raise ValueError(MODULE_NAME + ': incorrect apply range setting. ' + 'Apply range must be a tuple/list with 2 elements') + return mask + + +def daa(clip, mode=-1, opencl=False, opencl_device=-1): + core = vs.get_core() + nnedi3_attr = ((opencl is True and getattr(core, 'nnedi3cl', getattr(core, 'znedi3', getattr(core, 'nnedi3')))) + or getattr(core, 'znedi3', getattr(core, 'nnedi3'))) + nnedi3 = (hasattr(nnedi3_attr, 'NNEDI3CL') and nnedi3_attr.NNEDI3CL) or nnedi3_attr.nnedi3 + nnedi3 = (nnedi3.name == 'NNEDI3CL' and functools.partial(nnedi3, device=opencl_device)) or nnedi3 + if mode == -1: + nn = nnedi3(clip, field=3) + nnt = nnedi3(core.std.Transpose(clip), field=3).std.Transpose() + clph = core.std.Merge(core.std.SelectEvery(nn, cycle=2, offsets=0), + core.std.SelectEvery(nn, cycle=2, offsets=1)) + clpv = core.std.Merge(core.std.SelectEvery(nnt, cycle=2, offsets=0), + core.std.SelectEvery(nnt, cycle=2, offsets=1)) + clp = core.std.Merge(clph, clpv) + elif mode == 1: + nn = nnedi3(clip, field=3) + clp = core.std.Merge(core.std.SelectEvery(nn, cycle=2, offsets=0), + core.std.SelectEvery(nn, cycle=2, offsets=1)) + elif mode == 2: + nnt = nnedi3(core.std.Transpose(clip), field=3).std.Transpose() + clp = core.std.Merge(core.std.SelectEvery(nnt, cycle=2, offsets=0), + core.std.SelectEvery(nnt, cycle=2, offsets=1)) + else: + raise ValueError(MODULE_NAME + ': daa: at least one direction should be processed.') + return clp + + +def temporal_stabilize(clip, src, delta=3, pel=1, retain=0.6): + core = vs.get_core() + clip_bits = clip.format.bits_per_sample + src_bits = src.format.bits_per_sample + if clip_bits != src_bits: + raise ValueError(MODULE_NAME + ': temporal_stabilize: bits depth of clip and src mismatch.') + if delta not in [1, 2, 3]: + raise ValueError(MODULE_NAME + ': temporal_stabilize: delta (1~3) invalid.') + + diff = core.std.MakeDiff(src, clip) + clip_super = core.mv.Super(clip, pel=pel) + diff_super = core.mv.Super(diff, pel=pel, levels=1) + + backward_vectors = [core.mv.Analyse(clip_super, isb=True, delta=i + 1, overlap=8, blksize=16) for i in range(delta)] + forward_vectors = [core.mv.Analyse(clip_super, isb=False, delta=i + 1, overlap=8, blksize=16) for i in range(delta)] + vectors = [vector for vector_group in zip(backward_vectors, forward_vectors) for vector in vector_group] + + stabilize_func = { + 1: core.mv.Degrain1, + 2: core.mv.Degrain2, + 3: core.mv.Degrain3 + } + diff_stabilized = stabilize_func[delta](diff, diff_super, *vectors) + + neutral = 1 << (clip_bits - 1) + expr = 'x {neutral} - abs y {neutral} - abs < x y ?'.format(neutral=neutral) + diff_stabilized_limited = core.std.Expr([diff, diff_stabilized], expr) + diff_stabilized = core.std.Merge(diff_stabilized_limited, diff_stabilized, retain) + clip_stabilized = core.std.MakeDiff(src, diff_stabilized) + return clip_stabilized + + +def soothe(clip, src, keep=24): + core = vs.get_core() + clip_bits = clip.format.bits_per_sample + src_bits = src.format.bits_per_sample + if clip_bits != src_bits: + raise ValueError(MODULE_NAME + ': temporal_stabilize: bits depth of clip and src mismatch.') + + neutral = 1 << (clip_bits - 1) + ceil = (1 << clip_bits) - 1 + multiple = ceil // 255 + const = 100 * multiple + kp = keep * multiple + + diff = core.std.MakeDiff(src, clip) + try: + diff_soften = core.misc.AverageFrame(diff, weights=[1, 1, 1], scenechange=32) + except AttributeError: + diff_soften = core.focus.TemporalSoften(diff, radius=1, luma_threshold=255, + chroma_threshold=255, scenechange=32, mode=2) + diff_soothed_expr = "x {neutral} - y {neutral} - * 0 < x {neutral} - {const} / {kp} * {neutral} + " \ + "x {neutral} - abs y {neutral} - abs > " \ + "x {kp} * y {const} {kp} - * + {const} / x ? ?".format(neutral=neutral, const=const, kp=kp) + diff_soothed = core.std.Expr([diff, diff_soften], diff_soothed_expr) + clip_soothed = core.std.MakeDiff(src, diff_soothed) + return clip_soothed + + +def aa_cycle(clip, aa_class, cycle, *args, **kwargs): + aaed = aa_class(clip, *args, **kwargs).out() + return aaed if cycle <= 0 else aa_cycle(aaed, aa_class, cycle - 1, *args, **kwargs) + + +def TAAmbk(clip, aatype=1, aatypeu=None, aatypev=None, preaa=0, strength=0.0, cycle=0, mtype=None, mclip=None, + mthr=None, mlthresh=None, mpand=(0, 0), txtmask=0, txtfade=0, thin=0, dark=0.0, sharp=0, + aarepair=0, postaa=None, src=None, stabilize=0, down8=True, showmask=0, opencl=False, opencl_device=-1, + **kwargs): + core = vs.get_core() + + aatypeu = aatype if aatypeu is None else aatypeu + aatypev = aatype if aatypev is None else aatypev + if mtype is None: + mtype = 0 if preaa == 0 and True not in (aatype, aatypeu, aatypev) else 1 + if postaa is None: + postaa = True if abs(sharp) > 70 or (0.4 < abs(sharp) < 1) else False + if src is None: + src = clip + else: + if clip.format.id != src.format.id: + raise ValueError(MODULE_NAME + ': clip format and src format mismatch.') + elif clip.width != src.width or clip.height != src.height: + raise ValueError(MODULE_NAME + ': clip resolution and src resolution mismatch.') + + preaa_clip = clip if preaa == 0 else daa(clip, preaa, opencl, opencl_device) + edge_enhanced_clip = (thin != 0 and core.warp.AWarpSharp2(preaa_clip, depth=int(thin)) or preaa_clip) + edge_enhanced_clip = (dark != 0 and haf.Toon(edge_enhanced_clip, str=float(dark)) or edge_enhanced_clip) + + aa_kernel = { + 0: lambda clip, *args, **kwargs: type('', (), {'out': lambda: clip}), + 1: AAEedi2, + 2: AAEedi3, + 3: AANnedi3, + 4: AANnedi3UpscaleSangNom, + 5: AASpline64NRSangNom, + 6: AASpline64SangNom, + -1: AAEedi2SangNom, + -2: AAEedi3SangNom, + -3: AANnedi3SangNom, + 'Eedi2': AAEedi2, + 'Eedi3': AAEedi3, + 'Nnedi3': AANnedi3, + 'Nnedi3UpscaleSangNom': AANnedi3UpscaleSangNom, + 'Spline64NrSangNom': AASpline64NRSangNom, + 'Spline64SangNom': AASpline64SangNom, + 'Eedi2SangNom': AAEedi2SangNom, + 'Eedi3SangNom': AAEedi3SangNom, + 'Nnedi3SangNom': AANnedi3SangNom, + 'PointSangNom': AAPointSangNom, + 'Unknown': lambda clip, *args, **kwargs: type('', (), { + 'out': lambda: exec('raise ValueError(MODULE_NAME + ": unknown aatype, aatypeu or aatypev")')}), + 'Custom': kwargs.get('aakernel', lambda clip, *args, **kwargs: type('', (), { + 'out': lambda: exec('raise RuntimeError(MODULE_NAME + ": custom aatype: aakernel must be set.")')})), + } + + if clip.format.color_family is vs.YUV: + yuv = [core.std.ShufflePlanes(edge_enhanced_clip, i, vs.GRAY) for i in range(clip.format.num_planes)] + aatypes = [aatype, aatypeu, aatypev] + aa_classes = [aa_kernel.get(aatype, aa_kernel['Unknown']) for aatype in aatypes] + aa_clips = [aa_cycle(plane, aa_class, cycle, strength if yuv.index(plane) == 0 else 0, down8, opencl=opencl, + opencl_device=opencl_device, **kwargs) for plane, aa_class in zip(yuv, aa_classes)] + aaed_clip = core.std.ShufflePlanes(aa_clips, [0, 0, 0], vs.YUV) + elif clip.format.color_family is vs.GRAY: + gray = edge_enhanced_clip + aa_class = aa_kernel.get(aatype, aa_kernel['Unknown']) + aaed_clip = aa_cycle(gray, aa_class, cycle, strength, down8, **kwargs) + else: + raise ValueError(MODULE_NAME + ': Unsupported color family.') + + abs_sharp = abs(sharp) + if sharp >= 1: + sharped_clip = haf.LSFmod(aaed_clip, strength=int(abs_sharp), defaults='old', source=src) + elif sharp > 0: + per = int(40 * abs_sharp) + matrix = [-1, -2, -1, -2, 52 - per, -2, -1, -2, -1] + sharped_clip = core.std.Convolution(aaed_clip, matrix) + elif sharp == 0: + sharped_clip = aaed_clip + elif sharp > -1: + sharped_clip = haf.LSFmod(aaed_clip, strength=round(abs_sharp * 100), defaults='fast', source=src) + elif sharp == -1: + blured = core.rgvs.RemoveGrain(aaed_clip, mode=20 if aaed_clip.width > 1100 else 11) + diff = core.std.MakeDiff(aaed_clip, blured) + diff = core.rgvs.Repair(diff, core.std.MakeDiff(src, aaed_clip), mode=13) + sharped_clip = core.std.MergeDiff(aaed_clip, diff) + else: + sharped_clip = aaed_clip + + postaa_clip = sharped_clip if postaa is False else soothe(sharped_clip, src, 24) + repaired_clip = ((aarepair > 0 and core.rgvs.Repair(src, postaa_clip, aarepair)) or + (aarepair < 0 and core.rgvs.Repair(postaa_clip, src, -aarepair)) or postaa_clip) + stabilized_clip = repaired_clip if stabilize == 0 else temporal_stabilize(repaired_clip, src, stabilize) + + if mclip is not None: + try: + masked_clip = core.std.MaskedMerge(src, stabilized_clip, mclip, first_plane=True) + masker = type('', (), {'__call__': lambda *args, **kwargs: mclip})() + except vs.Error: + raise RuntimeError( + MODULE_NAME + ': Something wrong with your mclip. Maybe format, resolution or bit_depth mismatch.') + else: + # Use lambda for lazy evaluation + mask_kernel = { + 0: lambda: lambda a, b, *args, **kwargs: b, + 1: lambda: mask_lthresh(clip, mthr, mlthresh, mask_sobel, mpand, opencl=opencl, + opencl_device=opencl_device, **kwargs), + 2: lambda: mask_lthresh(clip, mthr, mlthresh, mask_robert, mpand, **kwargs), + 3: lambda: mask_lthresh(clip, mthr, mlthresh, mask_prewitt, mpand, **kwargs), + 4: lambda: mask_lthresh(clip, mthr, mlthresh, mask_tedge, mpand, **kwargs), + 5: lambda: mask_lthresh(clip, mthr, mlthresh, mask_canny_continuous, mpand, opencl=opencl, + opencl_device=opencl_device, **kwargs), + 6: lambda: mask_lthresh(clip, mthr, mlthresh, mask_msharpen, mpand, **kwargs), + 'Sobel': lambda: mask_lthresh(clip, mthr, mlthresh, mask_sobel, mpand, opencl=opencl, + opencl_device=opencl_device, **kwargs), + 'Canny': lambda: mask_lthresh(clip, mthr, mlthresh, mask_canny_binarized, mpand, opencl=opencl, + opencl_device=opencl_device, **kwargs), + 'Prewitt': lambda: mask_lthresh(clip, mthr, mlthresh, mask_prewitt, mpand, **kwargs), + 'Robert': lambda: mask_lthresh(clip, mthr, mlthresh, mask_robert, mpand, **kwargs), + 'TEdge': lambda: mask_lthresh(clip, mthr, mlthresh, mask_tedge, mpand, **kwargs), + 'Canny_Old': lambda: mask_lthresh(clip, mthr, mlthresh, mask_canny_continuous, mpand, opencl=opencl, + opencl_device=opencl_device, **kwargs), + 'MSharpen': lambda: mask_lthresh(clip, mthr, mlthresh, mask_msharpen, mpand, **kwargs), + 'Unknown': lambda: exec('raise ValueError(MODULE_NAME + ": unknown mtype")') + } + mtype = 5 if mtype is None else mtype + mthr = (24,) if mthr is None else mthr + masker = mask_kernel.get(mtype, mask_kernel['Unknown'])() + masked_clip = masker(src, stabilized_clip) + + if txtmask > 0 and clip.format.color_family is not vs.GRAY: + text_mask = mask_fadetxt(clip, lthr=txtmask, fade_num=txtfade) + txt_protected_clip = core.std.MaskedMerge(masked_clip, src, text_mask, first_plane=True) + else: + text_mask = src + txt_protected_clip = masked_clip + + final_output = ((showmask == -1 and text_mask) or + (showmask == 1 and masker(None, src, show=True)) or + (showmask == 2 and core.std.StackVertical([core.std.ShufflePlanes([masker(None, src, show=True), + core.std.BlankClip(src)], [0, 1, 2], vs.YUV), src])) or + (showmask == 3 and core.std.Interleave([core.std.ShufflePlanes([masker(None, src, show=True), + core.std.BlankClip(src)], [0, 1, 2], vs.YUV), src])) or + txt_protected_clip) + return final_output