diff --git a/k7sfunc.py b/k7sfunc.py index c4779085..7e223d1c 100644 --- a/k7sfunc.py +++ b/k7sfunc.py @@ -2,14 +2,14 @@ ### 文档: https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc ################################################## -__version__ = "0.1.5" +__version__ = "0.1.20" __all__ = [ - "FMT_CTRL", "FPS_CHANGE", "FPS_CTRL", - "ACNET_STD", "CUGAN_NV", "ESRGAN_NV", "NNEDI3_STD", "WAIFU_NV", + "FMT_CHANGE", "FMT_CTRL", "FPS_CHANGE", "FPS_CTRL", + "ACNET_STD", "CUGAN_NV", "ESRGAN_DML", "ESRGAN_NV", "NNEDI3_STD", "WAIFU_DML", "WAIFU_NV", "MVT_LQ", "MVT_STD", "MVT_POT", "MVT_MQ", "RIFE_STD", "RIFE_NV", "RIFE_NV_ORT", "SVP_LQ", "SVP_STD", "SVP_HQ", "SVP_PRO", - "BM3D_NV", "CCD_STD", "FFT3D_STD", "NLM_STD", "NLM_NV", - "AA_NV", "COLOR_P3W_FIX", "DEBAND_STD", "DEINT_STD", "IVTC_STD", "STAB_STD", "STAB_HQ", "UAI_NV_TRT", + "BILA_NV", "BM3D_NV", "CCD_STD", "DFTT_STD", "DFTT_NV", "FFT3D_STD", "NLM_STD", "NLM_NV", + "AA_NV", "COLOR_P3W_FIX", "CSC_RB", "DEBAND_STD", "DEINT_LQ", "DEINT_STD", "DEINT_EX", "IVTC_STD", "STAB_STD", "STAB_HQ", "UAI_DML", "UAI_NV_TRT", ] import os @@ -38,9 +38,66 @@ vs_api = vs.__api_version__.api_major +dfttest2 = None nnedi3_resample = None +QTGMCv2 = None vsmlrt = None +################################################## +## 格式转换 # TODO +################################################## + +def FMT_CHANGE( + input : vs.VideoNode, + fmtc : bool = False, # TODO + algo : typing.Literal[1, 2, 3, 4] = 1, + param_a : float = 0.0, + param_b : float = 0.0, + w_out : int = 0, + h_out : int = 0, + fmt_pix : typing.Literal[-1, 0, 1, 2, 3] = -1, + dither : typing.Literal[0, 1, 2, 3] = 0, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "FMT_CHANGE" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(fmtc, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 fmtc 的值无效") + if algo not in [1, 2, 3, 4] : + raise vs.Error(f"模块 {func_name} 的子参数 algo 的值无效") + if not isinstance(param_a, (int, float)) or not isinstance(param_b, (int, float)) : + raise vs.Error(f"模块 {func_name} 的子参数 param_a 或 param_b 的值无效") + if not isinstance(w_out, int) or not isinstance(h_out, int) : + raise vs.Error(f"模块 {func_name} 的子参数 w_out 或 h_out 的值无效") + if isinstance(w_out, int) and isinstance(h_out, int) : + if w_out < 0 or h_out < 0 : + raise vs.Error(f"模块 {func_name} 的子参数 w_out 或 h_out 的值无效") + if fmt_pix not in [-1, 0, 1, 2, 3] : + raise vs.Error(f"模块 {func_name} 的子参数 fmt_pix 的值无效") + if dither not in [0, 1, 2, 3] : + raise vs.Error(f"模块 {func_name} 的子参数 dither 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + fmt_in = input.format.id + algo_val = ["Bilinear", "Bicubic", "Lanczos", "Spline36"][algo - 1] + resizer = getattr(core.resize, algo_val) + if fmt_pix > 0 : + fmt_pix_val = [vs.YUV420P8, vs.YUV420P10, vs.YUV444P16][fmt_pix - 1] + fmt_out = fmt_pix_val + elif fmt_pix == 0 : + fmt_out = fmt_in + if fmt_in not in [vs.YUV420P8, vs.YUV420P10] : + fmt_out = vs.YUV420P10 + dither_val = ["none", "ordered", "random", "error_diffusion"][dither] + + output = resizer(clip=input, width=w_out if w_out else None, height=h_out if h_out else None, filter_param_a=param_a, filter_param_b=param_b, format=fmt_pix_val if fmt_pix >= 0 else None, dither_type=dither_val) + + return output + ################################################## ## 限制输出的格式与高度 ################################################## @@ -49,6 +106,8 @@ def FMT_CTRL( input : vs.VideoNode, h_max : int = 0, h_ret : bool = False, + spl_b : float = 1/3, # TODO 替换为 FMT_CHANGE + spl_c : float = 1/3, fmt_pix : typing.Literal[0, 1, 2, 3] = 0, vs_t : int = vs_thd_dft, ) -> vs.VideoNode : @@ -60,47 +119,59 @@ def FMT_CTRL( raise vs.Error(f"模块 {func_name} 的子参数 h_max 的值无效") if not isinstance(h_ret, bool) : raise vs.Error(f"模块 {func_name} 的子参数 h_ret 的值无效") + if not isinstance(spl_b, (int, float)) : + raise vs.Error(f"模块 {func_name} 的子参数 spl_b 的值无效") + if not isinstance(spl_c, (int, float)) : + raise vs.Error(f"模块 {func_name} 的子参数 spl_c 的值无效") if fmt_pix not in [0, 1, 2, 3] : raise vs.Error(f"模块 {func_name} 的子参数 fmt_pix 的值无效") if not isinstance(vs_t, int) or vs_t > vs_thd_init : raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") core.num_threads = vs_t - fmt_in = input.format.id + fmt_src = input.format + fmt_in = fmt_src.id + spl_b, spl_c = float(spl_b), float(spl_c) + w_in, h_in = input.width, input.height + # https://github.com/mpv-player/mpv/blob/master/video/filter/vf_vapoursynth.c + fmt_mpv = [vs.YUV420P8, vs.YUV420P10, vs.YUV422P8, vs.YUV422P10, vs.YUV410P8, vs.YUV411P8, vs.YUV440P8, vs.YUV444P8, vs.YUV444P10] + fmt_pass = [vs.YUV420P8, vs.YUV420P10, vs.YUV444P16] + fmt_safe = [vs.YUV444P8, vs.YUV444P10, vs.YUV444P16] + if fmt_pix : - if fmt_pix == 1 : - fmt_pix = vs.YUV420P8 - elif fmt_pix == 2 : - fmt_pix = vs.YUV420P10 - elif fmt_pix == 3 : - fmt_pix = vs.YUV444P16 - fmt_out = fmt_pix + fmt_pix_val = fmt_pass[fmt_pix - 1] + fmt_out = fmt_pix_val if fmt_out == fmt_in : clip = input else : - clip = core.resize.Bilinear(clip=input, format=fmt_out) + if (fmt_out not in fmt_safe) and (fmt_in in fmt_safe) : + if not (w_in % 2 == 0) : + w_in = w_in - 1 + if not (h_in % 2 == 0) : + h_in = h_in - 1 + clip = core.resize.Bicubic(clip=input, width=w_in, height=h_in, filter_param_a=spl_b, filter_param_b=spl_c, format=fmt_out) + else : + clip = core.resize.Bilinear(clip=input, format=fmt_out) else : - # https://github.com/mpv-player/mpv/blob/master/video/filter/vf_vapoursynth.c - fmt_mpv = [ - vs.YUV420P8, vs.YUV420P10, - vs.YUV422P8, vs.YUV422P10, - vs.YUV410P8, vs.YUV411P8, vs.YUV440P8, vs.YUV444P8, vs.YUV444P10, - ] if fmt_in not in fmt_mpv : fmt_out = vs.YUV420P10 - clip = core.resize.Bilinear(clip=input, format=fmt_out) + if (fmt_out not in fmt_safe) and (fmt_in in fmt_safe) : + if not (w_in % 2 == 0) : + w_in = w_in - 1 + if not (h_in % 2 == 0) : + h_in = h_in - 1 + clip = core.resize.Bicubic(clip=input, width=w_in, height=h_in, filter_param_a=spl_b, filter_param_b=spl_c, format=fmt_out) + else : + clip = core.resize.Bilinear(clip=input, format=fmt_out) else : fmt_out = fmt_in clip = input if h_max : - w_in = input.width - h_in = input.height if h_in > h_max : if h_ret : raise Exception("源高度超过限制的范围,已临时中止。") else : - fmt_src = input.format w_ds = w_in * (h_max / h_in) h_ds = h_max if fmt_src.subsampling_w or fmt_src.subsampling_h : @@ -115,23 +186,196 @@ def FMT_CTRL( if h_max >= h_in : output = clip else : - output = core.resize.Lanczos(clip=clip, width=w_ds, height=h_ds) + output = core.resize.Bicubic(clip=clip, width=w_ds, height=h_ds, filter_param_a=spl_b, filter_param_b=spl_c) elif not h_max and fmt_pix : - if fmt_pix == fmt_out : + if fmt_pix_val == fmt_out : output = clip else : - output = core.resize.Bilinear(clip=clip, format=fmt_pix) + output = core.resize.Bilinear(clip=clip, format=fmt_pix_val) else : if h_max >= h_in : - if fmt_pix == fmt_out : + if fmt_pix_val == fmt_out : output = clip else : - output = core.resize.Bilinear(clip=clip, format=fmt_pix) + output = core.resize.Bilinear(clip=clip, format=fmt_pix_val) else : - if fmt_pix == fmt_out : - output = core.resize.Lanczos(clip=clip, width=w_ds, height=h_ds) + if fmt_pix_val == fmt_out : + output = core.resize.Bicubic(clip=clip, width=w_ds, height=h_ds, filter_param_a=spl_b, filter_param_b=spl_c) else : - output = core.resize.Lanczos(clip=clip, width=w_ds, height=h_ds, format=fmt_pix) + output = core.resize.Bicubic(clip=clip, width=w_ds, height=h_ds, filter_param_a=spl_b, filter_param_b=spl_c) + + return output + +################################################## +## PORT adjust (a3af7cb57cb37747b0667346375536e65b1fed17) +## 均衡器 # helper +################################################## + +def EQ( + input : vs.VideoNode, + hue : typing.Optional[float] = None, + sat : typing.Optional[float] = None, + bright : typing.Optional[float] = None, + cont : typing.Optional[float] = None, + coring : bool = True, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + core.num_threads = vs_t + fmt_src = input.format + fmt_cf_in = fmt_src.color_family + fmt_bit_in = fmt_src.bits_per_sample + + if hue is not None or sat is not None : + hue = 0.0 if hue is None else hue + sat = 1.0 if sat is None else sat + hue = hue * math.pi / 180.0 + hue_sin = math.sin(hue) + hue_cos = math.cos(hue) + gray = 128 << (fmt_bit_in - 8) + chroma_min = 0 + chroma_max = (2 ** fmt_bit_in) - 1 + if coring: + chroma_min = 16 << (fmt_bit_in - 8) + chroma_max = 240 << (fmt_bit_in - 8) + expr_u = "x {} - {} * y {} - {} * + {} + {} max {} min".format(gray, hue_cos * sat, gray, hue_sin * sat, gray, chroma_min, chroma_max) + expr_v = "y {} - {} * x {} - {} * - {} + {} max {} min".format(gray, hue_cos * sat, gray, hue_sin * sat, gray, chroma_min, chroma_max) + src_u = input.std.ShufflePlanes(planes=1, colorfamily=vs.GRAY) + src_v = input.std.ShufflePlanes(planes=2, colorfamily=vs.GRAY) + dst_u = core.std.Expr(clips=[src_u, src_v], expr=expr_u) + dst_v = core.std.Expr(clips=[src_u, src_v], expr=expr_v) + + output = core.std.ShufflePlanes(clips=[input, dst_u, dst_v], planes=[0, 0, 0], colorfamily=fmt_cf_in) + + if bright is not None or cont is not None : + bright = 0.0 if bright is None else bright + cont = 1.0 if cont is None else cont + luma_lut = [] + luma_min = 0 + luma_max = (2 ** fmt_bit_in) - 1 + if coring : + luma_min = 16 << (fmt_bit_in - 8) + luma_max = 235 << (fmt_bit_in - 8) + for i in range(2 ** fmt_bit_in) : + val = int((i - luma_min) * cont + bright + luma_min + 0.5) + luma_lut.append(min(max(val, luma_min), luma_max)) + + output = input.std.Lut(planes=0, lut=luma_lut) + + return output + +################################################## +## 提取高频层 # helper +################################################## + +def LAYER_HIGH( + input : vs.VideoNode, + blur_m : typing.Literal[0, 1, 2] = 2, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + core.num_threads = vs_t + fmt_in = input.format.id + + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + if blur_m == 0 : + output_blur = cut0 + output_diff = None + elif blur_m == 1 : + blur = core.rgvs.RemoveGrain(clip=cut0, mode=20) + blur = core.rgvs.RemoveGrain(clip=blur, mode=20) + output_blur = core.rgvs.RemoveGrain(clip=blur, mode=20) + elif blur_m == 2 : + blur = core.std.Convolution(clip=cut0, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + output_blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + + if blur_m : + output_diff = core.std.MakeDiff(clipa=cut0, clipb=blur) + + return output_blur, output_diff + +################################################## +## 提取线条 # helper +################################################## + +def LINE_MASK( + input : vs.VideoNode, + cpu : bool = True, + gpu : typing.Literal[-1, 0, 1, 2] = -1, + plane : typing.List[int] = [0], + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + core.num_threads = vs_t + + if cpu : # r13+ + output = core.tcanny.TCanny(clip=input, sigma=1.5, t_h=8.0, t_l=1.0, mode=0, op=1, planes=plane) + else : # r12 + output = core.tcanny.TCannyCL(clip=input, sigma=1.5, t_h=8.0, t_l=1.0, mode=0, op=1, device=gpu, planes=plane) + + return output + +################################################## +## 分离平面 # helper +################################################## + +def PLANE_EXTR( + input : vs.VideoNode, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + core.num_threads = vs_t + + ''' obs + output = [] + for plane in range(input.format.num_planes) : + clips = core.std.ShufflePlanes(clips=input, planes=plane, colorfamily=vs.GRAY) + output.append(clips) + ''' + + output = core.std.SplitPlanes(clip=input) + + return output + +################################################## +## 动态范围修正 # helper +################################################## + +def RANGE_CHANGE( + input : vs.VideoNode, + l2f : bool = True, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + core.num_threads = vs_t + fmt_in = input.format.id + + cut0 = input + if fmt_in in [vs.YUV420P8, vs.YUV422P8, vs.YUV410P8, vs.YUV411P8, vs.YUV440P8, vs.YUV444P8] : + lv_val_pre = 0 + elif fmt_in in [vs.YUV420P10, vs.YUV422P10, vs.YUV444P10] : + lv_val_pre = 1 + elif fmt_in in [vs.YUV420P16, vs.YUV422P16, vs.YUV444P16] : + lv_val_pre = 2 + else : + cut0 = core.resize.Bilinear(format=vs.YUV444P16) + lv_val_pre = 2 + + lv_val1 = [16, 64, 4096][lv_val_pre] + lv_val2 = [235, 940, 60160][lv_val_pre] + lv_val2_alt = [240, 960, 61440][lv_val_pre] + lv_val3 = 0 + lv_val4 = [255, 1023, 65535][lv_val_pre] + if l2f : + cut1 = core.std.Levels(clip=cut0, min_in=lv_val1, max_in=lv_val2, min_out=lv_val3, max_out=lv_val4, planes=0) + output = core.std.Levels(clip=cut1, min_in=lv_val1, max_in=lv_val2_alt, min_out=lv_val3, max_out=lv_val4, planes=[1,2]) + else : + cut1 = core.std.Levels(clip=cut0, min_in=lv_val3, max_in=lv_val4, min_out=lv_val1, max_out=lv_val2, planes=0) + output = core.std.Levels(clip=cut1, min_in=lv_val3, max_in=lv_val4, min_out=lv_val1, max_out=lv_val2_alt, planes=[1,2]) return output @@ -159,17 +403,17 @@ def FPS_CHANGE( core.num_threads = vs_t - def ChangeFPS(clip: vs.VideoNode, fpsnum: int, fpsden: int = 1) -> vs.VideoNode: + def _ChangeFPS(clip: vs.VideoNode, fpsnum: int, fpsden: int = 1) -> vs.VideoNode : factor = (fpsnum / fpsden) * (clip.fps_den / clip.fps_num) - def frame_adjuster(n: int) -> vs.VideoNode: + def _frame_adjuster(n: int) -> vs.VideoNode : real_n = math.floor(n / factor) one_frame_clip = clip[real_n] * (len(clip) + 100) return one_frame_clip attribute_clip = clip.std.BlankClip(length=math.floor(len(clip) * factor), fpsnum=fpsnum, fpsden=fpsden) - return attribute_clip.std.FrameEval(eval=frame_adjuster) + return attribute_clip.std.FrameEval(eval=_frame_adjuster) src = core.std.AssumeFPS(clip=input, fpsnum=fps_in * 1000, fpsden=1000) - fin = ChangeFPS(clip=src, fpsnum=fps_out * 1000, fpsden=1000) + fin = _ChangeFPS(clip=src, fpsnum=fps_out * 1000, fpsden=1000) output = core.std.AssumeFPS(clip=fin, fpsnum=fps_out * 1000, fpsden=1000) return output @@ -242,7 +486,10 @@ def ACNET_STD( core.num_threads = vs_t fmt_in = input.format.id - cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) cut1 = core.anime4kcpp.Anime4KCPP(src=cut0, zoomFactor=2, ACNet=1, GPUMode=1, GPGPUModel="opencl" if gpu_m==1 else "cuda", HDN=nr, HDNLevel=nr_lv, platformID=gpu, deviceID=gpu) output = core.resize.Bilinear(clip=cut1, format=fmt_in) @@ -257,7 +504,7 @@ def CUGAN_NV( lt_hd : bool = False, nr_lv : typing.Literal[-1, 0, 3] = -1, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, st_eng : bool = False, ws_size : int = 0, vs_t : int = vs_thd_dft, @@ -272,7 +519,7 @@ def CUGAN_NV( raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(st_eng, bool) : raise vs.Error(f"模块 {func_name} 的子参数 st_eng 的值无效") @@ -312,13 +559,63 @@ def CUGAN_NV( ## Real-ESRGAN放大 ################################################## +def ESRGAN_DML( + input : vs.VideoNode, + lt_hd : bool = False, + model : typing.Literal[0, 2, 5000, 5001, 5002, 5003, 5004] = 5000, + scale : typing.Literal[1, 2, 3, 4] = 2, + gpu : typing.Literal[0, 1, 2] = 0, + gpu_t : int = 2, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "ESRGAN_DML" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(lt_hd, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 lt_hd 的值无效") + if model not in [0, 2, 5000, 5001, 5002, 5003, 5004] : + raise vs.Error(f"模块 {func_name} 的子参数 model 的值无效") + if scale not in [1, 2, 3, 4] : + raise vs.Error(f"模块 {func_name} 的子参数 scale 的值无效") + if gpu not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(gpu_t, int) or gpu_t <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + global vsmlrt + if vsmlrt is None : + import vsmlrt + + core.num_threads = vs_t + w_in, h_in = input.width, input.height + size_in = w_in * h_in + colorlv = getattr(input.get_frame(0).props, "_ColorRange", 0) + fmt_in = input.format.id + + if (not lt_hd and (size_in > 1280 * 720)) or (size_in > 2048 * 1080) : + raise Exception("源分辨率超过限制的范围,已临时中止。") + + cut1 = input.resize.Bilinear(format=vs.RGBS, matrix_in_s="709") + cut2 = vsmlrt.RealESRGANv2(clip=cut1, scale=scale, model=model, backend=vsmlrt.BackendV2.ORT_DML( + device_id=gpu, num_streams=gpu_t, fp16=True)) + output = core.resize.Bilinear(clip=cut2, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) + + return output + +################################################## +## Real-ESRGAN放大 +################################################## + def ESRGAN_NV( input : vs.VideoNode, lt_hd : bool = False, model : typing.Literal[0, 2, 5000, 5001, 5002, 5003, 5004] = 5000, scale : typing.Literal[1, 2, 3, 4] = 2, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, st_eng : bool = False, ws_size : int = 0, vs_t : int = vs_thd_dft, @@ -335,7 +632,7 @@ def ESRGAN_NV( raise vs.Error(f"模块 {func_name} 的子参数 scale 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(st_eng, bool) : raise vs.Error(f"模块 {func_name} 的子参数 st_eng 的值无效") @@ -431,13 +728,67 @@ def NNEDI3_STD( ## Waifu2x放大 ################################################## +def WAIFU_DML( + input : vs.VideoNode, + lt_hd : bool = False, + model : typing.Literal[3, 5, 6] = 3, + nr_lv : typing.Literal[-1, 0, 1, 2, 3] = 1, + scale : typing.Literal[1, 2, 3, 4] = 2, + gpu : typing.Literal[0, 1, 2] = 0, + gpu_t : int = 2, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "WAIFU_DML" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(lt_hd, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 lt_hd 的值无效") + if model not in [3, 5, 6] : + raise vs.Error(f"模块 {func_name} 的子参数 model 的值无效") + if nr_lv not in [-1, 0, 1, 2, 3] : + raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") + if scale not in [1, 2, 3, 4] : + raise vs.Error(f"模块 {func_name} 的子参数 scale 的值无效") + if gpu not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(gpu_t, int) or gpu_t <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + global vsmlrt + if vsmlrt is None : + import vsmlrt + + core.num_threads = vs_t + w_in, h_in = input.width, input.height + size_in = w_in * h_in + colorlv = getattr(input.get_frame(0).props, "_ColorRange", 0) + fmt_in = input.format.id + + if (not lt_hd and (size_in > 1280 * 720)) or (size_in > 2048 * 1080) : + raise Exception("源分辨率超过限制的范围,已临时中止。") + + cut1 = input.resize.Bilinear(format=vs.RGBS, matrix_in_s="709") + cut2 = vsmlrt.Waifu2x(clip=cut1, noise=nr_lv, scale=scale, model=model, backend=vsmlrt.BackendV2.ORT_DML( + device_id=gpu, num_streams=gpu_t, fp16=True)) + output = core.resize.Bilinear(clip=cut2, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) + + return output + +################################################## +## Waifu2x放大 +################################################## + def WAIFU_NV( input : vs.VideoNode, lt_hd : bool = False, + model : typing.Literal[3, 5, 6] = 3, nr_lv : typing.Literal[-1, 0, 1, 2, 3] = 1, scale : typing.Literal[1, 2, 3, 4] = 2, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, st_eng : bool = False, ws_size : int = 0, vs_t : int = vs_thd_dft, @@ -448,13 +799,15 @@ def WAIFU_NV( raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") if not isinstance(lt_hd, bool) : raise vs.Error(f"模块 {func_name} 的子参数 lt_hd 的值无效") + if model not in [3, 5, 6] : + raise vs.Error(f"模块 {func_name} 的子参数 model 的值无效") if nr_lv not in [-1, 0, 1, 2, 3] : raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") if scale not in [1, 2, 3, 4] : raise vs.Error(f"模块 {func_name} 的子参数 scale 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(st_eng, bool) : raise vs.Error(f"模块 {func_name} 的子参数 st_eng 的值无效") @@ -479,7 +832,7 @@ def WAIFU_NV( raise Exception("源分辨率不属于动态引擎支持的范围,已临时中止。") cut1 = input.resize.Bilinear(format=vs.RGBH, matrix_in_s="709") - cut2 = vsmlrt.Waifu2x(clip=cut1, noise=nr_lv, scale=scale, model=3, backend=vsmlrt.BackendV2.TRT( + cut2 = vsmlrt.Waifu2x(clip=cut1, noise=nr_lv, scale=scale, model=model, backend=vsmlrt.BackendV2.TRT( num_streams=gpu_t, force_fp16=True, output_format=1, workspace=None if ws_size < 128 else (ws_size if st_eng else ws_size * 2), use_cuda_graph=True, use_cublas=False, use_cudnn=False, @@ -567,7 +920,7 @@ def MVT_STD( core.num_threads = vs_t - def ffps(fps) : + def _ffps(fps) : rfps = int('%.0f' % fps) if ( abs(fps - (rfps/1.001)) < abs(fps - (rfps/1.000)) ) : vfps, vden = rfps*1000, 1001 @@ -575,7 +928,7 @@ def ffps(fps) : vfps, vden = rfps*1000, 1000 return vfps, vden - vfps, vden = ffps(fps_in) + vfps, vden = _ffps(fps_in) cut1 = core.std.AssumeFPS(input, fpsnum=int(vfps), fpsden=vden) cut_s = core.mv.Super(clip=cut1, sharp=1, rfilter=4) @@ -701,11 +1054,12 @@ def MVT_MQ( def RIFE_STD( input : vs.VideoNode, sc_mode : typing.Literal[0, 1, 2] = 1, + skip : bool = True, stat_th : float = 60.0, fps_num : int = 2, fps_den : int = 1, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, vs_t : int = vs_thd_dft, ) -> vs.VideoNode : @@ -714,6 +1068,8 @@ def RIFE_STD( raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") if sc_mode not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 sc_mode 的值无效") + if not isinstance(skip, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 skip 的值无效") if stat_th <= 0.0 : raise vs.Error(f"模块 {func_name} 的子参数 fps_in 的值无效") if not isinstance(fps_num, int) or fps_num <= 1 : @@ -722,7 +1078,7 @@ def RIFE_STD( raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(vs_t, int) or vs_t > vs_thd_init : raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") @@ -741,7 +1097,7 @@ def RIFE_STD( cut0 = core.mv.SCDetection(clip=input, vectors=vec, thscd1=240, thscd2=130) cut1 = core.resize.Bilinear(clip=cut0, format=vs.RGBS, matrix_in_s="709") - cut2 = core.rife.RIFE(clip=cut1, model=9, factor_num=fps_num, factor_den=fps_den, gpu_id=gpu, gpu_thread=gpu_t, sc=True if sc_mode else False, skip=True, skip_threshold=stat_th) + cut2 = core.rife.RIFE(clip=cut1, model=9, factor_num=fps_num, factor_den=fps_den, gpu_id=gpu, gpu_thread=gpu_t, sc=True if sc_mode else False, skip=skip, skip_threshold=stat_th) output = core.resize.Bilinear(clip=cut2, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) return output @@ -758,7 +1114,7 @@ def RIFE_NV( t_tta : bool = False, ext_proc : bool = True, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, st_eng : bool = False, ws_size : int = 0, vs_t : int = vs_thd_dft, @@ -779,7 +1135,7 @@ def RIFE_NV( raise vs.Error(f"模块 {func_name} 的子参数 ext_proc 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(st_eng, bool) : raise vs.Error(f"模块 {func_name} 的子参数 st_eng 的值无效") @@ -837,7 +1193,7 @@ def RIFE_NV( fin = core.std.Crop(clip=fin, right=w_tmp, bottom=h_tmp) else : fin = vsmlrt.RIFE(clip=cut1, multi=fps_num, scale=scale_model, model=46, ensemble=t_tta, _implementation=2, backend=vsmlrt.BackendV2.TRT( - num_streams=gpu_t, force_fp16=False, output_format=0, + num_streams=gpu_t, fp16=False, force_fp16=False, tf32=True, output_format=0, workspace=None if ws_size < 128 else ws_size, use_cuda_graph=True, use_cublas=False, use_cudnn=False, static_shape=st_eng, min_shapes=[0, 0], @@ -858,7 +1214,7 @@ def RIFE_NV_ORT( cudnn : bool = False, ext_proc : bool = False, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, vs_t : int = vs_thd_dft, ) -> vs.VideoNode : @@ -988,7 +1344,7 @@ def SVP_STD( raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") core.num_threads = vs_t - fps_in = fps_in + fmt_in = input.format.id fps_out = fps_out * 1e6 acc = 1 if cpu == 0 else 0 @@ -996,7 +1352,10 @@ def SVP_STD( super_params = "{pel:2,gpu:%d,scale:{up:2,down:4}}" % (acc) analyse_params = "{block:{w:32,h:32,overlap:2},main:{levels:4,search:{type:4,distance:-8,coarse:{type:4,distance:-5,bad:{range:0}}},penalty:{plevel:1.3,pzero:110,pnbour:75}},refine:[{thsad:200,search:{type:4,distance:2}}]}" - clip_f = core.resize.Bilinear(clip=input, format=vs.YUV420P8) + if fmt_in == vs.YUV420P8 : + clip_f = input + else : + clip_f = core.resize.Bilinear(clip=input, format=vs.YUV420P8) super = core.svp1.Super(clip_f, super_params) vectors = core.svp1.Analyse(super["clip"], super["data"], input if acc else clip_f, analyse_params) smooth = core.svp2.SmoothFps(input if acc else clip_f, super["clip"], super["data"], vectors["clip"], vectors["data"], smoothfps_params, src=input if acc else clip_f, fps=fps_in) @@ -1107,7 +1466,7 @@ def SVP_HQ( overlap = 2 if cpu == 0 else 3 w, h = input.width, input.height - if (freq - fps < 2): + if (freq - fps < 2) : raise Exception("Interpolation is not necessary.") target_fps = 60 @@ -1116,18 +1475,18 @@ def SVP_HQ( ap = "{block:{w:32,h:16,overlap:%d},main:{levels:5,search:{type:4,distance:-12,coarse:{type:4,distance:-1,trymany:true,bad:{range:0}}},penalty:{lambda:3.33,plevel:1.33,lsad:3300,pzero:110,pnbour:50}},refine:[{thsad:400},{thsad:200,search:{type:4,distance:-4}}]}" % (overlap) fp = "{gpuid:%d,algo:23,rate:{num:%d,den:%d,abs:true},mask:{cover:80,area:30,area_sharp:0.75},scene:{mode:0,limits:{scene:6000,zero:100,blocks:40}}}" % (gpu, round(min(max(target_fps, fps * 2, freq / 2), freq)) * 1000, 1001) - def toYUV420(clip) : - if clip.format.id == vs.YUV420P8: + def _toYUV420(clip) : + if clip.format.id == vs.YUV420P8 : clip8 = clip - elif clip.format.id == vs.YUV420P10: + elif clip.format.id == vs.YUV420P10 : clip8 = clip.resize.Bilinear(format=vs.YUV420P8) - else: + else : clip = clip.resize.Bilinear(format=vs.YUV420P10) clip8 = clip.resize.Bilinear(format=vs.YUV420P8) return clip, clip8 - def svpflow(clip, fps, sp, ap, fp) : - clip, clip8 = toYUV420(clip) + def _svpflow(clip, fps, sp, ap, fp) : + clip, clip8 = _toYUV420(clip) s = core.svp1.Super(clip8, sp) r = s["clip"], s["data"] v = core.svp1.Analyse(*r, clip, ap) @@ -1135,7 +1494,46 @@ def svpflow(clip, fps, sp, ap, fp) : clip = core.svp2.SmoothFps(clip if acc else clip8, *r, fp, src=clip, fps=fps) return clip - output = svpflow(input, fps, sp, ap, fp) + output = _svpflow(input, fps, sp, ap, fp) + + return output + +################################################## +## Bilateral降噪 +################################################## + +def BILA_NV( + input : vs.VideoNode, + nr_spat : typing.List[float] = [3.0, 0.0, 0.0], + nr_csp : typing.List[float] = [0.02, 0.0, 0.0], + gpu : typing.Literal[0, 1, 2] = 0, + gpu_t : int = 4, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "BILA_NV" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not (isinstance(nr_spat, list) and len(nr_spat) == 3) : + raise vs.Error(f"模块 {func_name} 的子参数 nr_spat 的值无效") + if not (isinstance(nr_csp, list) and len(nr_csp) == 3) : + raise vs.Error(f"模块 {func_name} 的子参数 nr_csp 的值无效") + if gpu not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(gpu_t, int) or gpu_t <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + fmt_in = input.format.id + + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + cut1 = core.bilateralgpu_rtc.Bilateral(clip=cut0, sigma_spatial=nr_spat, sigma_color=nr_csp, device_id=gpu, num_streams=gpu_t, use_shared_memory=True) + output = core.resize.Bilinear(clip=cut1, format=fmt_in) return output @@ -1199,9 +1597,7 @@ def CCD_STD( colorlv = getattr(input.get_frame(0).props, "_ColorRange", 0) fmt_in = input.format.id - def ccd(src: vs.VideoNode, threshold: float = 4) -> vs.VideoNode: - if src.format.color_family != vs.RGB or src.format.sample_type != vs.FLOAT: - raise ValueError('ccd: only RGBS format is supported') + def _ccd(src: vs.VideoNode, threshold: float = 4) -> vs.VideoNode : thr = threshold**2/195075.0 r = core.std.ShufflePlanes([src, src, src], [0, 0, 0], vs.RGB) g = core.std.ShufflePlanes([src, src, src], [1, 1, 1], vs.RGB) @@ -1240,11 +1636,109 @@ def ccd(src: vs.VideoNode, threshold: float = 4) -> vs.VideoNode: return ex_ccd cut = core.resize.Bilinear(clip=input, format=vs.RGBS, matrix_in_s="709") - fin = ccd(src=cut, threshold=nr_lv) + fin = _ccd(src=cut, threshold=nr_lv) output = core.resize.Bilinear(clip=fin, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) return output +################################################## +## DFTTest降噪 +################################################## + +def DFTT_STD( + input : vs.VideoNode, + plane : typing.List[int] = [0], + nr_lv : float = 8.0, + size_sb : int = 16, + size_so : int = 12, + size_tb : int = 3, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "DFTT_STD" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if plane not in ([0], [1], [2], [0, 1], [0, 2], [1, 2], [0, 1, 2]) : + raise vs.Error(f"模块 {func_name} 的子参数 plane 的值无效") + if nr_lv <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") + if not isinstance(size_sb, int) or size_sb <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_sb 的值无效") + if not isinstance(size_so, int) or size_so <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_so 的值无效") + if not isinstance(size_tb, int) or size_tb <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_tb 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + fmt_in = input.format.id + + global dfttest2 + if dfttest2 is None : + import dfttest2 + + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + cut1 = dfttest2.DFTTest2(clip=cut0, planes=plane, sigma=nr_lv, sbsize=size_sb, sosize=size_so, tbsize=size_tb, backend=dfttest2.Backend.CPU()) + output = core.resize.Bilinear(clip=cut1, format=fmt_in) + + return output + +################################################## +## DFTTest降噪 +################################################## + +def DFTT_NV( + input : vs.VideoNode, + plane : typing.List[int] = [0], + nr_lv : float = 8.0, + size_sb : int = 16, + size_so : int = 12, + size_tb : int = 3, + gpu : typing.Literal[0, 1, 2] = 0, + gpu_t : int = 4, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "DFTT_NV" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if plane not in ([0], [1], [2], [0, 1], [0, 2], [1, 2], [0, 1, 2]) : + raise vs.Error(f"模块 {func_name} 的子参数 plane 的值无效") + if nr_lv <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") + if not isinstance(size_sb, int) or size_sb <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_sb 的值无效") + if not isinstance(size_so, int) or size_so <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_so 的值无效") + if not isinstance(size_tb, int) or size_tb <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 size_tb 的值无效") + if gpu not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(gpu_t, int) or gpu_t <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + fmt_in = input.format.id + + global dfttest2 + if dfttest2 is None : + import dfttest2 + + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + cut1 = dfttest2.DFTTest2(clip=cut0, planes=plane, sigma=nr_lv, sbsize=size_sb, sosize=size_so, tbsize=size_tb, backend=dfttest2.Backend.NVRTC(device_id=gpu, num_streams=gpu_t)) + output = core.resize.Bilinear(clip=cut1, format=fmt_in) + + return output + ################################################## ## FFT3D降噪 ################################################## @@ -1322,21 +1816,9 @@ def NLM_STD( core.num_threads = vs_t fmt_in = input.format.id - - cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) - if blur_m == 0 : - blur = cut0 - elif blur_m == 1 : - blur = core.rgvs.RemoveGrain(clip=cut0, mode=20) - blur = core.rgvs.RemoveGrain(clip=blur, mode=20) - blur = core.rgvs.RemoveGrain(clip=blur, mode=20) - elif blur_m == 2 : - blur = core.std.Convolution(clip=cut0, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) - blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) - blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + blur, diff = LAYER_HIGH(input=input, blur_m=blur_m, vs_t=vs_t) if blur_m : - diff = core.std.MakeDiff(clipa=cut0, clipb=blur) if nlm_m == 1 : cut1 = core.knlm.KNLMeansCL(clip=diff, d=frame_num, a=rad_sw, s=rad_snw, h=nr_lv, channels="auto", wmode=2, wref=1.0, rclip=None, device_type="GPU", device_id=gpu) @@ -1367,7 +1849,7 @@ def NLM_NV( rad_snw : int = 2, nr_lv : float = 3.0, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 4, vs_t : int = vs_thd_dft, ) -> vs.VideoNode : @@ -1386,28 +1868,16 @@ def NLM_NV( raise vs.Error(f"模块 {func_name} 的子参数 nr_lv 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(vs_t, int) or vs_t > vs_thd_init : raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") core.num_threads = vs_t fmt_in = input.format.id - - cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) - if blur_m == 0 : - blur = cut0 - elif blur_m == 1 : - blur = core.rgvs.RemoveGrain(clip=cut0, mode=20) - blur = core.rgvs.RemoveGrain(clip=blur, mode=20) - blur = core.rgvs.RemoveGrain(clip=blur, mode=20) - elif blur_m == 2 : - blur = core.std.Convolution(clip=cut0, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) - blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) - blur = core.std.Convolution(clip=blur, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) + blur, diff = LAYER_HIGH(input=input, blur_m=blur_m, vs_t=vs_t) if blur_m : - diff = core.std.MakeDiff(clipa=cut0, clipb=blur) cut1 = core.nlm_cuda.NLMeans(clip=diff, d=frame_num, a=rad_sw, s=rad_snw, h=nr_lv, channels="AUTO", wmode=2, wref=1.0, rclip=None, device_id=gpu, num_streams=gpu_t) merge = core.std.MergeDiff(clipa=blur, clipb=cut1) @@ -1426,7 +1896,7 @@ def AA_NV( input : vs.VideoNode, # plane : typing.List[int] = [0], gpu : typing.Literal[-1, 0, 1, 2] = -1, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 4, vs_t : int = vs_thd_dft, ) -> vs.VideoNode : @@ -1437,7 +1907,7 @@ def AA_NV( # raise vs.Error(f"模块 {func_name} 的子参数 plane 的值无效") if gpu not in [-1, 0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(vs_t, int) or vs_t > vs_thd_init : raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") @@ -1481,6 +1951,90 @@ def COLOR_P3W_FIX( return output +################################################## +## MOD HAvsFunc (e236281cd8c1dd6b1b0cc906844944b79b1b52fa) +## 修正红蓝色度偏移 +################################################## + +def CSC_RB( + input : vs.VideoNode, + cx : int = 4, + cy : int = 4, + sat_lv1 : float = 4.0, + sat_lv2 : float = 0.8, + blur : bool = False, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "CSC_RB" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(cx, int) : + raise vs.Error(f"模块 {func_name} 的子参数 cx 的值无效") + if not isinstance(cy, int) : + raise vs.Error(f"模块 {func_name} 的子参数 cy 的值无效") + if not isinstance(thr, (int, float)) : + raise vs.Error(f"模块 {func_name} 的子参数 thr 的值无效") + if not isinstance(sat_lv, (int, float)) : + raise vs.Error(f"模块 {func_name} 的子参数 sat_lv 的值无效") + if not isinstance(blur, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 blur 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + neutral = 1 << (input.format.bits_per_sample - 1) + peak = (1 << input.format.bits_per_sample) - 1 + def _cround(x) : + return math.floor(x + 0.5) if x > 0 else math.ceil(x - 0.5) + def _scale(value, peak) : + return _cround(value * peak / 255) if peak != 1 else value / 255 + def _Levels(clip, input_low, gamma, input_high, output_low, output_high, coring=True) : + gamma = 1 / gamma + divisor = input_high - input_low + (input_high == input_low) + tvLow, tvHigh = _scale(16, peak), [_scale(235, peak), _scale(240, peak)] + scaleUp, scaleDown = peak / _scale(219, peak), _scale(219, peak) / peak + def _get_lut1(x) : + p = ((x - tvLow) * scaleUp - input_low) / divisor if coring else (x - input_low) / divisor + p = min(max(p, 0), 1) ** gamma * (output_high - output_low) + output_low + return min(max(_cround(p * scaleDown + tvLow), tvLow), tvHigh[0]) if coring else min(max(_cround(p), 0), peak) + def _get_lut2(x) : + q = _cround((x - neutral) * (output_high - output_low) / divisor + neutral) + return min(max(q, tvLow), tvHigh[1]) if coring else min(max(q, 0), peak) + last = clip.std.Lut(planes=[0], function=_get_lut1) + if clip.format.color_family != vs.GRAY : + last = last.std.Lut(planes=[1, 2], function=_get_lut2) + return last + def _GetPlane(clip, plane=0) : + sFormat = clip.format + sNumPlanes = sFormat.num_planes + last = core.std.ShufflePlanes(clips=clip, planes=plane, colorfamily=vs.GRAY) + return last + + fmt_cf_in = input.format.color_family + vch = _GetPlane(EQ(input, sat=sat_lv1, vs_t=vs_t), 2) + area = vch + if blur : + area = vch.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + + red = _Levels(area, _scale(255, peak), 1.0, _scale(255, peak), _scale(255, peak), 0) + blue = _Levels(area, 0, 1.0, 0, 0, _scale(255, peak)) + mask = core.std.Merge(clipa=red, clipb=blue) + + if not blur : + mask = mask.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]) + mask = _Levels(mask, _scale(250, peak), 1.0, _scale(250, peak), _scale(255, peak), 0) + mask = mask.std.Convolution(matrix=[0, 0, 0, 1, 0, 0, 0, 0, 0], divisor=1, saturate=False).std.Convolution( + matrix=[1, 1, 1, 1, 1, 1, 0, 0, 0], divisor=8, saturate=False) + mask = _Levels(mask, _scale(10, peak), 1.0, _scale(10, peak), 0, _scale(255, peak)).std.Inflate() + input_c = EQ(input.resize.Spline16(src_left=cx, src_top=cy), sat=sat_lv2, vs_t=vs_t) + fu = core.std.MaskedMerge(clipa=_GetPlane(input, 1), clipb=_GetPlane(input_c, 1), mask=mask) + fv = core.std.MaskedMerge(clipa=_GetPlane(input, 2), clipb=_GetPlane(input_c, 2), mask=mask) + + output = core.std.ShufflePlanes([input, fu, fv], planes=[0, 0, 0], colorfamily=fmt_cf_in) + + return output + ################################################## ## f3kdb去色带 ################################################## @@ -1521,13 +2075,47 @@ def DEBAND_STD( raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") core.num_threads = vs_t + fmt_in = fmt_in = input.format.id color_lv = getattr(input.get_frame(0).props, "_ColorRange", 0) - cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) + if fmt_in == vs.YUV444P16 : + cut0 = input + else : + cut0 = core.resize.Bilinear(clip=input, format=vs.YUV444P16) output = core.neo_f3kdb.Deband(clip=cut0, range=bd_range, y=bdy_rth, cb=bdc_rth, cr=bdc_rth, grainy=grainy, grainc=grainc, sample_mode=spl_m, dynamic_grain=grain_dy, mt=True, keep_tv_range=True if color_lv==1 else False, output_depth=depth) return output +################################################## +## 简易反交错 +################################################## + +def DEINT_LQ( + input : vs.VideoNode, + deint_m : typing.Literal[1, 2] = 1, + tff : bool = True, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "DEINT_STD" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if deint_m not in [1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 deint_m 的值无效") + if not isinstance(tff, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 tff 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + + if deint_m == 1 : + output = core.resize.Bob(clip=input, filter="bicubic", tff=tff) + elif deint_m == 2 : + output = core.bwdif.Bwdif(clip=input, field=3 if tff else 2) + + return output + ################################################## ## 基于nnedi3/eedi3作参考的反交错 ################################################## @@ -1535,6 +2123,7 @@ def DEBAND_STD( def DEINT_STD( input : vs.VideoNode, ref_m : typing.Literal[1, 2, 3] = 1, + tff : bool = True, gpu : typing.Literal[-1, 0, 1, 2] = -1, deint_m : typing.Literal[1, 2, 3] = 1, vs_t : int = vs_thd_dft, @@ -1545,6 +2134,8 @@ def DEINT_STD( raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") if ref_m not in [1, 2, 3] : raise vs.Error(f"模块 {func_name} 的子参数 ref_m 的值无效") + if not isinstance(tff, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 tff 的值无效") if gpu not in [-1, 0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") if deint_m not in [1, 2, 3] : @@ -1555,18 +2146,67 @@ def DEINT_STD( core.num_threads = vs_t if ref_m == 1 : - ref = core.znedi3.nnedi3(clip=input, field=3) + ref = core.znedi3.nnedi3(clip=input, field=3 if tff else 2, dh=False) elif ref_m == 2 : - ref = core.nnedi3cl.NNEDI3CL(clip=input, field=3, device=gpu) + ref = core.nnedi3cl.NNEDI3CL(clip=input, field=3 if tff else 2, dh=False, device=gpu) elif ref_m == 3 : - ref = core.eedi3m.EEDI3CL(clip=input, field=3, device=gpu) + ref = core.eedi3m.EEDI3CL(clip=input, field=3 if tff else 2, dh=False, device=gpu) if deint_m == 1 : - output = core.bwdif.Bwdif(clip=input, field=3, edeint=ref) + output = core.bwdif.Bwdif(clip=input, field=3 if tff else 2, edeint=ref) elif deint_m == 2 : - output = core.yadifmod.Yadifmod(clip=input, edeint=ref, order=1, mode=1) + output = core.yadifmod.Yadifmod(clip=input, edeint=ref, order=1 if tff else 0, mode=1) elif deint_m == 3 : - output = core.tdm.TDeintMod(clip=input, order=1, mode=1, length=6, ttype=0, edeint=ref) + output = core.tdm.TDeintMod(clip=input, order=1 if tff else 0, mode=1, length=6, ttype=0, edeint=ref) + + return output + +################################################## +## 终极反交错 +################################################## + +def DEINT_EX( + input : vs.VideoNode, + fps_in : float = 23.976, + obs : bool = True, + deint_lv : typing.Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] = 6, + src_type : typing.Literal[0, 1, 2, 3] = 0, + deint_den : typing.Literal[1, 2] = 1, + tff : typing.Literal[0, 1, 2] = 0, + cpu : bool = True, + gpu : typing.Literal[-1, 0, 1, 2] = -1, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "DEINT_EX" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if fps_in <= 0.0 : + raise vs.Error(f"模块 {func_name} 的子参数 fps_in 的值无效") + if not isinstance(obs, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 obs 的值无效") + if deint_lv not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] : + raise vs.Error(f"模块 {func_name} 的子参数 deint_lv 的值无效") + if src_type not in [0, 1, 2, 3] : + raise vs.Error(f"模块 {func_name} 的子参数 src_type 的值无效") + if deint_den not in [1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 deint_den 的值无效") + if tff not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 tff 的值无效") + if not isinstance(cpu, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 cpu 的值无效") + if gpu not in [-1, 0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + core.num_threads = vs_t + + global QTGMCv2 + if QTGMCv2 is None : + from qtgmc import QTGMCv2 + + output = QTGMCv2(input=input, fps_in=fps_in, obs=obs, deint_lv=deint_lv, src_type=src_type, deint_den=deint_den, tff=tff, cpu=cpu, gpu=gpu, check=False) return output @@ -1653,34 +2293,78 @@ def STAB_HQ( core.num_threads = vs_t - def scdetect(clip: vs.VideoNode, threshold: float = 0.1) -> vs.VideoNode: - def _copy_property(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame: + def _scdetect(clip: vs.VideoNode, threshold: float = 0.1) -> vs.VideoNode : + def _copy_property(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame : fout = f[0].copy() fout.props["_SceneChangePrev"] = f[1].props["_SceneChangePrev"] fout.props["_SceneChangeNext"] = f[1].props["_SceneChangeNext"] return fout sc = clip - if clip.format.color_family == vs.RGB: + if clip.format.color_family == vs.RGB : sc = clip.resize.Point(format=vs.GRAY8, matrix_s="709") sc = sc.misc.SCDetect(threshold=threshold) - if clip.format.color_family == vs.RGB: + if clip.format.color_family == vs.RGB : sc = clip.std.ModifyFrame(clips=[clip, sc], selector=_copy_property) return sc ## PORT HAvsFunc (17b62a0b2695e0950e0899dba466ab42327c32c9) - def average_frames(clip: vs.VideoNode, weights: typing.Union[float, typing.Sequence[float]], scenechange: typing.Optional[float] = None, planes: typing.Optional[typing.Union[int, typing.Sequence[int]]] = None) -> vs.VideoNode: - if scenechange: - clip = scdetect(clip, scenechange) + def _average_frames(clip: vs.VideoNode, weights: typing.Union[float, typing.Sequence[float]], scenechange: typing.Optional[float] = None, planes: typing.Optional[typing.Union[int, typing.Sequence[int]]] = None) -> vs.VideoNode : + if scenechange : + clip = _scdetect(clip, scenechange) return clip.std.AverageFrames(weights=weights, scenechange=scenechange, planes=planes) - def Stab(clp, dxmax=4, dymax=4, mirror=0): - temp = average_frames(clp, weights=[1] * 15, scenechange=25 / 255) - inter = core.std.Interleave([core.rgvs.Repair(temp, average_frames(clp, weights=[1] * 3, scenechange=25 / 255), mode=[1]), clp]) + def _Stab(clp, dxmax=4, dymax=4, mirror=0) : + temp = _average_frames(clp, weights=[1] * 15, scenechange=25 / 255) + inter = core.std.Interleave([core.rgvs.Repair(temp, _average_frames(clp, weights=[1] * 3, scenechange=25 / 255), mode=[1]), clp]) mdata = inter.mv.DepanEstimate(trust=0, dxmax=dxmax, dymax=dymax) last = inter.mv.DepanCompensate(data=mdata, offset=-1, mirror=mirror) return last[::2] - output = Stab(clp=input, mirror=15) + output = _Stab(clp=input, mirror=15) + + return output + +################################################## +## 自定义ONNX模型(仅支持放大类) +################################################## + +def UAI_DML( + input : vs.VideoNode, + clamp : bool = False, + model_pth : str = "", + gpu : typing.Literal[0, 1, 2] = 0, + gpu_t : int = 2, + vs_t : int = vs_thd_dft, +) -> vs.VideoNode : + + func_name = "UAI_DML" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(clamp, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 clamp 的值无效") + if len(model_pth) == 0 : + raise vs.Error(f"模块 {func_name} 的子参数 model_pth 的值无效") + if gpu not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not isinstance(gpu_t, int) or gpu_t <= 0 : + raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") + if not isinstance(vs_t, int) or vs_t > vs_thd_init : + raise vs.Error(f"模块 {func_name} 的子参数 vs_t 的值无效") + + global vsmlrt + if vsmlrt is None : + import vsmlrt + + core.num_threads = vs_t + fmt_in = input.format.id + colorlv = getattr(input.get_frame(0).props, "_ColorRange", 0) + + clip = core.resize.Bilinear(clip=input, format=vs.RGBS, matrix_in_s="709") + if clamp : + clip = core.akarin.Expr(clips=clip, expr="x 0 1 clamp") + be_param = vsmlrt.BackendV2.ORT_DML(device_id=gpu, num_streams=gpu_t, fp16=True) + infer = vsmlrt.inference(clips=clip, network_path=os.path.join(vsmlrt.models_path, model_pth), backend=be_param) + output = core.resize.Bilinear(clip=infer, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) return output @@ -1690,11 +2374,14 @@ def Stab(clp, dxmax=4, dymax=4, mirror=0): def UAI_NV_TRT( input : vs.VideoNode, + clamp : bool = False, model_pth : str = "", opt_lv : typing.Literal[0, 1, 2, 3, 4, 5] = 3, + cuda_opt : typing.List[int] = [0, 0, 0], fp16 : bool = False, + tf32 : bool = True, gpu : typing.Literal[0, 1, 2] = 0, - gpu_t : typing.Literal[1, 2, 3] = 2, + gpu_t : int = 2, st_eng : bool = False, res_opt : typing.List[int] = None, res_max : typing.List[int] = None, @@ -1705,15 +2392,21 @@ def UAI_NV_TRT( func_name = "UAI_NV_TRT" if not isinstance(input, vs.VideoNode) : raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if not isinstance(clamp, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 clamp 的值无效") if len(model_pth) == 0 : raise vs.Error(f"模块 {func_name} 的子参数 model_pth 的值无效") if opt_lv not in [0, 1, 2, 3, 4, 5] : raise vs.Error(f"模块 {func_name} 的子参数 opt_lv 的值无效") + if not (len(cuda_opt) == 3 and all(isinstance(num, int) and num in [0, 1] for num in cuda_opt)) : + raise vs.Error(f"模块 {func_name} 的子参数 cuda_opt 的值无效") if not isinstance(fp16, bool) : raise vs.Error(f"模块 {func_name} 的子参数 fp16 的值无效") + if not isinstance(tf32, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 tf32 的值无效") if gpu not in [0, 1, 2] : raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") - if gpu_t not in [1, 2, 3] : + if not isinstance(gpu_t, int) or gpu_t <= 0 : raise vs.Error(f"模块 {func_name} 的子参数 gpu_t 的值无效") if not isinstance(st_eng, bool) : raise vs.Error(f"模块 {func_name} 的子参数 st_eng 的值无效") @@ -1737,12 +2430,15 @@ def UAI_NV_TRT( core.num_threads = vs_t fmt_in = input.format.id colorlv = getattr(input.get_frame(0).props, "_ColorRange", 0) + nv1, nv2, nv3 = [bool(num) for num in cuda_opt] clip = core.resize.Bilinear(clip=input, format=vs.RGBH if fp16 else vs.RGBS, matrix_in_s="709") + if clamp : + clip = core.akarin.Expr(clips=clip, expr="x 0 1 clamp") be_param = vsmlrt.BackendV2.TRT( builder_optimization_level=opt_lv, short_path=True, device_id=gpu, - num_streams=gpu_t, use_cuda_graph=True, use_cublas=False, use_cudnn=False, - force_fp16=fp16, output_format=1 if fp16 else 0, workspace=None if ws_size < 128 else (ws_size if st_eng else ws_size * 2), + num_streams=gpu_t, use_cuda_graph=nv1, use_cublas=nv2, use_cudnn=nv3, + fp16=fp16, force_fp16=False, tf32=tf32, output_format=1 if fp16 else 0, workspace=None if ws_size < 128 else (ws_size if st_eng else ws_size * 2), static_shape=st_eng, min_shapes=[0, 0] if st_eng else [64, 64], opt_shapes=None if st_eng else res_opt, max_shapes=None if st_eng else res_max) infer = vsmlrt.inference(clips=clip, network_path=os.path.join(vsmlrt.models_path, model_pth), backend=be_param) output = core.resize.Bilinear(clip=infer, format=fmt_in, matrix_s="709", range=1 if colorlv==0 else None) diff --git a/portable_config/vs/etc_deint_ex.vpy b/portable_config/vs/ETC_DEINT_EX.vpy similarity index 72% rename from portable_config/vs/etc_deint_ex.vpy rename to portable_config/vs/ETC_DEINT_EX.vpy index 292053e9..c92cc096 100644 --- a/portable_config/vs/etc_deint_ex.vpy +++ b/portable_config/vs/ETC_DEINT_EX.vpy @@ -1,8 +1,9 @@ +### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc ### 超级去隔行/反交错 import vapoursynth as vs from vapoursynth import core -import qtgmc +import k7sfunc as k7f clip = video_in @@ -21,5 +22,5 @@ Gpu = -1 ## 是否仅使用CPU ## <-1|0|1|2> 使用的显卡序号,-1 为自动,0 为排序一号 -clip = qtgmc.QTGMCv2(clip, fps_in=container_fps, obs=True, deint_lv=Deint_Lv, src_type=Src_Type, deint_den=1, tff=Tff, cpu=Cpu, gpu=Gpu) +clip = k7f.DEINT_EX(clip, fps_in=container_fps, obs=True, deint_lv=Deint_Lv, src_type=Src_Type, deint_den=1, tff=Tff, cpu=Cpu, gpu=Gpu) clip.set_output() diff --git a/portable_config/vs/MEMC_MVT_LQ.vpy b/portable_config/vs/MEMC_MVT_LQ.vpy index 95caac0e..ec3f588d 100644 --- a/portable_config/vs/MEMC_MVT_LQ.vpy +++ b/portable_config/vs/MEMC_MVT_LQ.vpy @@ -15,7 +15,7 @@ H_Pre = 1440 Recal = True Block = True Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## 是否使用二次分析 ## 是否使用Block模式 ## 锁定像素格式为yuv420p8 diff --git a/portable_config/vs/MEMC_MVT_STD.vpy b/portable_config/vs/MEMC_MVT_STD.vpy index fbd474f5..a0e309a3 100644 --- a/portable_config/vs/MEMC_MVT_STD.vpy +++ b/portable_config/vs/MEMC_MVT_STD.vpy @@ -14,7 +14,7 @@ clip = video_in H_Pre = 1440 Fps_Out = 60.0 Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## 浮点,目标帧率,示例即约60fps。特殊值比如 display_fps/2 即目标为显示器刷新率的一半帧率;填 container_fps*2 即倍帧(越高的帧率增加耗能,产生更多伪影,通常是不必要的。例如对于144hz的显示器来说,最多只需要补到72fps,剩下的可让 --interpolation 帧混成补偿) ## 锁定像素格式为yuv420p8 diff --git a/portable_config/vs/MEMC_RIFE_NV.vpy b/portable_config/vs/MEMC_RIFE_NV.vpy index ecc9d290..be244e6b 100644 --- a/portable_config/vs/MEMC_RIFE_NV.vpy +++ b/portable_config/vs/MEMC_RIFE_NV.vpy @@ -22,7 +22,7 @@ Gpu_T = 2 St_Eng = False Ws_Size = 0 Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## 是否对超过DCI2K分辨率的源进行补帧 ## <0|1|2> 场景切换检测的模式,0为禁用 ## <2|3|4> 补帧倍率 diff --git a/portable_config/vs/MEMC_RIFE_STD.vpy b/portable_config/vs/MEMC_RIFE_STD.vpy index 679f9eaf..3b1f7816 100644 --- a/portable_config/vs/MEMC_RIFE_STD.vpy +++ b/portable_config/vs/MEMC_RIFE_STD.vpy @@ -15,21 +15,23 @@ H_Pre = 1440 Fps_Num = 2 Fps_Den = 1 Sc_Mode = 1 +Skip = True Stat_Th = 60.0 Gpu = 0 Gpu_T = 2 Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## 整数,Fps_Num/Fps_Den 的值即帧率倍数 ## 整数 ## <0|1|2> 场景切换检测的模式,0为禁用 -## 浮点,静止帧的检测阈值,不对该类帧进行补偿计算 +## 是否跳过静止帧的补偿计算 +## 浮点,静止帧的判定阈值 ## 使用的显卡序号,0为排序一号 ## <1|2|3> 显卡线程数 ## 是否锁定像素格式为yuv420p8 ret = k7f.FPS_CTRL(clip, fps_in=container_fps, fps_ret=True) clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) -clip = k7f.RIFE_STD(clip, sc_mode=Sc_Mode, stat_th=Stat_Th, fps_num=2, fps_den=1, gpu=Gpu, gpu_t=Gpu_T) +clip = k7f.RIFE_STD(clip, sc_mode=Sc_Mode, skip=Skip, stat_th=Stat_Th, fps_num=2, fps_den=1, gpu=Gpu, gpu_t=Gpu_T) clip.set_output() diff --git a/portable_config/vs/MEMC_SVP_LQ.vpy b/portable_config/vs/MEMC_SVP_LQ.vpy index 024cbe36..43636bf7 100644 --- a/portable_config/vs/MEMC_SVP_LQ.vpy +++ b/portable_config/vs/MEMC_SVP_LQ.vpy @@ -15,7 +15,7 @@ H_Pre = 1440 Cpu = 0 Gpu = 0 Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## <0|1> 是否只使用CPU ## <0|11|12|21> 使用的显卡序号,0为排序一号 ## 是否锁定像素格式为yuv420p8 diff --git a/portable_config/vs/MEMC_SVP_PRO.vpy b/portable_config/vs/MEMC_SVP_PRO.vpy index cdaf517f..acc49429 100644 --- a/portable_config/vs/MEMC_SVP_PRO.vpy +++ b/portable_config/vs/MEMC_SVP_PRO.vpy @@ -18,7 +18,7 @@ Abs = True Nvof = False Gpu = 0 Lk_Fmt = False -## 整数,预降低处理源高度 +## 整数,预降低处理源高度(填你的显示器高度) ## 整数, Fps_Num/Fps_Den 的计算结果即最终帧率或倍率 ## 整数 ## 输出帧率(True)还是倍率(False) diff --git a/portable_config/vs/NR_BM3D_NV.vpy b/portable_config/vs/NR_BM3D_NV.vpy index ba1718b4..0514a205 100644 --- a/portable_config/vs/NR_BM3D_NV.vpy +++ b/portable_config/vs/NR_BM3D_NV.vpy @@ -11,7 +11,7 @@ clip = video_in # 用户选项 # ############ -Nr_Lv = [5,1,1] +Nr_Lv = [5,0,0] Bs_Ref = 8 Bs_Out = 7 Gpu = 0 diff --git a/portable_config/vs/SR_CUGAN_NV.vpy b/portable_config/vs/SR_ESRGAN_DML.vpy similarity index 57% rename from portable_config/vs/SR_CUGAN_NV.vpy rename to portable_config/vs/SR_ESRGAN_DML.vpy index 12736eb7..3cf78190 100644 --- a/portable_config/vs/SR_CUGAN_NV.vpy +++ b/portable_config/vs/SR_ESRGAN_DML.vpy @@ -1,5 +1,5 @@ ### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc -### Real-CUGAN (pro) 放大,性能要求超级高,N卡专用 +### Real-ESRGAN 放大或降噪,性能要求极高,仅支持DX12的显卡使用 import vapoursynth as vs from vapoursynth import core @@ -13,25 +13,22 @@ clip = video_in H_Pre = 720 Lt_Hd = False -Nr_Lv = -1 +Model = 5000 Gpu = 0 Gpu_T = 2 -St_Eng = False -Ws_Size = 0 H_Max = 1440 Lk_Fmt = False ## 整数,预降低处理源高度 ## 是否对超过HD分辨率(720P)的源进行处理 -## <-1|0|3> 降噪等级,-1为不降噪 +## <0|2|5000|5001|5002|5003|5004> 使用的模型 ## 使用的显卡序号,0为排序一号 ## <1|2|3> 使用的显卡线程数 -## 是否使用静态引擎(需要对不同分辨率的源各进行预处理);动态引擎自适应不同分辨率(64²→DCI2K) -## <0~1024> 约束显存(MiB),静态引擎的最小值为128(动态引擎自动双倍),设为低于此数的值即为不限制 ## 整数,输出高度限制(填你的显示器高度) ## 是否锁定像素格式为yuv420p8 +ret = k7f.FMT_CTRL(clip, h_max=1200, h_ret=True) clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) -clip = k7f.CUGAN_NV(clip, lt_hd=Lt_Hd, nr_lv=Nr_Lv, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) +clip = k7f.ESRGAN_DML(clip, lt_hd=Lt_Hd, model=Model, scale=4 if Model==2 else 2, gpu=Gpu, gpu_t=Gpu_T) clip = k7f.FMT_CTRL(clip, h_max=H_Max, fmt_pix=1 if Lk_Fmt else 0) clip.set_output() diff --git a/portable_config/vs/SR_ESRGAN_NV.vpy b/portable_config/vs/SR_ESRGAN_NV.vpy index aff52b67..da98aaf2 100644 --- a/portable_config/vs/SR_ESRGAN_NV.vpy +++ b/portable_config/vs/SR_ESRGAN_NV.vpy @@ -14,7 +14,6 @@ clip = video_in H_Pre = 720 Lt_Hd = False Model = 5000 -Scale = 2 Gpu = 0 Gpu_T = 2 St_Eng = False @@ -23,8 +22,7 @@ H_Max = 1440 Lk_Fmt = False ## 整数,预降低处理源高度 ## 是否对超过HD分辨率(720P)的源进行处理 -## <2|5000|5001|5002|5003|5004> 使用的模型 -## <2|4> 放大倍率,模型二号应使用4,其它使用2 +## <0|2|5000|5001|5002|5003|5004> 使用的模型 ## 使用的显卡序号,0为排序一号 ## <1|2|3> 使用的显卡线程数 ## 是否使用静态引擎(需要对不同分辨率的源各进行预处理);动态引擎自适应不同分辨率(64²→DCI2K) @@ -32,8 +30,9 @@ Lk_Fmt = False ## 整数,输出高度限制(填你的显示器高度) ## 是否锁定像素格式为yuv420p8 +ret = k7f.FMT_CTRL(clip, h_max=1200, h_ret=True) clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) -clip = k7f.ESRGAN_NV(clip, lt_hd=Lt_Hd, model=Model, scale=Scale, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) +clip = k7f.ESRGAN_NV(clip, lt_hd=Lt_Hd, model=Model, scale=4 if Model==2 else 2, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) clip = k7f.FMT_CTRL(clip, h_max=H_Max, fmt_pix=1 if Lk_Fmt else 0) clip.set_output() diff --git a/portable_config/vs/SR_WAIFU_DML.vpy b/portable_config/vs/SR_WAIFU_DML.vpy new file mode 100644 index 00000000..7639e14b --- /dev/null +++ b/portable_config/vs/SR_WAIFU_DML.vpy @@ -0,0 +1,36 @@ +### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc +### waifu2x 放大或降噪,性能要求非常高,仅支持DX12的显卡使用 + +import vapoursynth as vs +from vapoursynth import core +import k7sfunc as k7f + +clip = video_in + +############ +# 用户选项 # +############ + +H_Pre = 720 +Lt_Hd = False +Model = 3 +Nr_Lv = 2 +Gpu = 0 +Gpu_T = 2 +H_Max = 1440 +Lk_Fmt = False +## 整数,预降低处理源高度 +## 是否对超过HD分辨率(720P)的源进行处理 +## <3|5|6> 使用的模型 +## <-1|0|1|2|3> 降噪等级,-1为不降噪 +## 使用的显卡序号,0为排序一号 +## <1|2|3> 使用的显卡线程数 +## 整数,输出高度限制(填你的显示器高度) +## 是否锁定像素格式为yuv420p8 + +ret = k7f.FMT_CTRL(clip, h_max=1200, h_ret=True) +clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) +clip = k7f.WAIFU_DML(clip, lt_hd=Lt_Hd, model=Model, nr_lv=Nr_Lv, scale=2, gpu=Gpu, gpu_t=Gpu_T) +clip = k7f.FMT_CTRL(clip, h_max=H_Max, fmt_pix=1 if Lk_Fmt else 0) + +clip.set_output() diff --git a/portable_config/vs/SR_WAIFU_NV.vpy b/portable_config/vs/SR_WAIFU_NV.vpy index 05b8aefa..7335bcda 100644 --- a/portable_config/vs/SR_WAIFU_NV.vpy +++ b/portable_config/vs/SR_WAIFU_NV.vpy @@ -1,5 +1,5 @@ ### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc -### waifu2x (upconv_7_anime_style_art_rgb) 放大或降噪,性能要求非常高,N卡专用 +### waifu2x 放大或降噪,性能要求非常高,N卡专用 import vapoursynth as vs from vapoursynth import core @@ -13,6 +13,7 @@ clip = video_in H_Pre = 720 Lt_Hd = False +Model = 3 Nr_Lv = 2 Gpu = 0 Gpu_T = 2 @@ -22,8 +23,8 @@ H_Max = 1440 Lk_Fmt = False ## 整数,预降低处理源高度 ## 是否对超过HD分辨率(720P)的源进行处理 +## <3|5|6> 使用的模型 ## <-1|0|1|2|3> 降噪等级,-1为不降噪 -## <1|2> 放大倍率,1为不放大 ## 使用的显卡序号,0为排序一号 ## <1|2|3> 使用的显卡线程数 ## 是否使用静态引擎(需要对不同分辨率的源各进行预处理);动态引擎自适应不同分辨率(64²→DCI2K) @@ -31,8 +32,9 @@ Lk_Fmt = False ## 整数,输出高度限制(填你的显示器高度) ## 是否锁定像素格式为yuv420p8 +ret = k7f.FMT_CTRL(clip, h_max=1200, h_ret=True) clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) -clip = k7f.WAIFU_NV(clip, lt_hd=Lt_Hd, nr_lv=Nr_Lv, scale=2, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) +clip = k7f.WAIFU_NV(clip, lt_hd=Lt_Hd, model=Model, nr_lv=Nr_Lv, scale=2, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) clip = k7f.FMT_CTRL(clip, h_max=H_Max, fmt_pix=1 if Lk_Fmt else 0) clip.set_output() diff --git a/qtgmc.py b/qtgmc.py index 540a64ff..d759113f 100644 --- a/qtgmc.py +++ b/qtgmc.py @@ -1,4 +1,7 @@ -### MOD HAvsFunc (0f6a7d9d9712d59b4e74e1e570fc6e3a526917f9) + +__version__ = "0.0.3" + +__all__ = ["QTGMC", "QTGMC_obs", "QTGMCv2"] import vapoursynth as vs from vapoursynth import core @@ -8,6 +11,9 @@ vstools = None QTGMC_globals = {} +dfttest2 = None + +### MOD HAvsFunc (0f6a7d9d9712d59b4e74e1e570fc6e3a526917f9) def QTGMC( Input: vs.VideoNode, @@ -361,18 +367,22 @@ def QTGMC( import vsexprtools import vsrgtools - def average_frames( + global dfttest2 + if dfttest2 is None : + import dfttest2 + + def _average_frames( clip: vs.VideoNode, weights: float | typing.Sequence[float], scenechange: float | None = None, planes: vstools.PlanesT = None ) -> vs.VideoNode: - assert vstools.check_variable(clip, average_frames) + assert vstools.check_variable(clip, _average_frames) planes = vstools.normalize_planes(clip, planes) - def scdetect(clip: vs.VideoNode, threshold: float = 0.1) -> vs.VideoNode: + def _scdetect(clip: vs.VideoNode, threshold: float = 0.1) -> vs.VideoNode: def _copy_property(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame: fout = f[0].copy() fout.props["_SceneChangePrev"] = f[1].props["_SceneChangePrev"] fout.props["_SceneChangeNext"] = f[1].props["_SceneChangeNext"] return fout - assert vstools.check_variable(clip, scdetect) + assert vstools.check_variable(clip, _scdetect) sc = clip if clip.format.color_family == vs.RGB: sc = clip.resize.Point(format=vs.GRAY8, matrix_s="709") @@ -381,10 +391,10 @@ def _copy_property(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame: sc = clip.std.ModifyFrame([clip, sc], _copy_property) return sc if scenechange: - clip = scdetect(clip, scenechange) + clip = _scdetect(clip, scenechange) return clip.std.AverageFrames(weights=weights, scenechange=scenechange, planes=planes) - def mt_clamp( + def _mt_clamp( clip: vs.VideoNode, bright: vs.VideoNode, dark: vs.VideoNode, @@ -393,8 +403,8 @@ def mt_clamp( planes: vstools.PlanesT = None, ) -> vs.VideoNode: ## clamp the value of the clip between bright + overshoot and dark - undershoot - vstools.check_ref_clip(clip, bright, mt_clamp) - vstools.check_ref_clip(clip, dark, mt_clamp) + vstools.check_ref_clip(clip, bright, _mt_clamp) + vstools.check_ref_clip(clip, dark, _mt_clamp) planes = vstools.normalize_planes(clip, planes) if vsexprtools.complexpr_available: expr = f"x z {undershoot} - y {overshoot} + clamp" @@ -503,7 +513,7 @@ def mt_clamp( elif pNum < 8: SearchParam = 16 - # Noise presets Slower Slow Medium Fast Faster + # Noise presets Slower Slow Medium Fast Faster Denoiser = vstools.fallback(Denoiser, ['dfttest', 'dfttest', 'dfttest', 'fft3df', 'fft3df'][npNum]).lower() DenoiseMC = vstools.fallback(DenoiseMC, [ True, True, False, False, False ][npNum]) NoiseTR = vstools.fallback(NoiseTR, [ 2, 1, 1, 1, 0 ][npNum]) @@ -688,9 +698,9 @@ def mt_clamp( # Create linear weightings of neighbors first -2 -1 0 1 2 if not isinstance(srchClip, vs.VideoNode): if TR0 > 0: - ts1 = average_frames(bobbed, weights=[1] * 3, scenechange=28 / 255, planes=CMplanes) # 0.00 0.33 0.33 0.33 0.00 + ts1 = _average_frames(bobbed, weights=[1] * 3, scenechange=28 / 255, planes=CMplanes) # 0.00 0.33 0.33 0.33 0.00 if TR0 > 1: - ts2 = average_frames(bobbed, weights=[1] * 5, scenechange=28 / 255, planes=CMplanes) # 0.20 0.20 0.20 0.20 0.20 + ts2 = _average_frames(bobbed, weights=[1] * 5, scenechange=28 / 255, planes=CMplanes) # 0.20 0.20 0.20 0.20 0.20 # Combine linear weightings to give binomial weightings - TR0=0: (1), TR0=1: (1:2:1), TR0=2: (1:4:6:4:1) if isinstance(srchClip, vs.VideoNode): @@ -843,7 +853,7 @@ def mt_clamp( import mvsfunc as mvf dnWindow = mvf.BM3D(noiseWindow, radius1=NoiseTR, sigma=[Sigma if vstools.plane in CNplanes else 0 for vstools.plane in range(3)]) elif Denoiser == 'dfttest': - dnWindow = noiseWindow.dfttest.DFTTest(sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) + dnWindow = dfttest2.DFTTest(clip=noiseWindow, sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) #TODO:GPU elif Denoiser in ['knlm', 'knlmeanscl']: dnWindow = vsdenoise.nl_means(noiseWindow, strength=Sigma, tr=NoiseTR, planes=CNplanes) else: @@ -1047,7 +1057,7 @@ def mt_clamp( else: sharpLimit1 = core.rgvs.Repair(backBlend1, core.rgvs.Repair(backBlend1, edi, mode=12), mode=1) elif SLMode == 2: - sharpLimit1 = mt_clamp(backBlend1, tMax, tMin, SOvs, SOvs) + sharpLimit1 = _mt_clamp(backBlend1, tMax, tMin, SOvs, SOvs) else: sharpLimit1 = backBlend1 @@ -1093,7 +1103,7 @@ def mt_clamp( else: sharpLimit2 = core.rgvs.Repair(repair2, core.rgvs.Repair(repair2, edi, mode=12), mode=1) elif SLMode >= 4: - sharpLimit2 = mt_clamp(repair2, tMax, tMin, SOvs, SOvs) + sharpLimit2 = _mt_clamp(repair2, tMax, tMin, SOvs, SOvs) else: sharpLimit2 = repair2 @@ -1624,10 +1634,10 @@ def QTGMC_GetUserGlobal(Prefix: str, Name: str) -> typing.Union[vs.VideoNode, No # QTGMC( Preset="Slow", ShowSettings=True ) def QTGMC_obs_Scale(value, peak): - def cround(x): + def _cround(x): return math.floor(x + 0.5) if x > 0 else math.ceil(x - 0.5) - return cround(value * peak / 255) if peak != 1 else value / 255 + return _cround(value * peak / 255) if peak != 1 else value / 255 def QTGMC_obs_Weave(clip, tff): if not isinstance(clip, vs.VideoNode): @@ -1645,11 +1655,15 @@ def QTGMC_obs( StabilizeNoise=None, InputType=0, ProgSADMask=None, FPSDivisor=1, ShutterBlur=0, ShutterAngleSrc=180, ShutterAngleOut=180, SBlurLimit=4, Border=False, Precise=None, Tuning='None', ShowSettings=False, ForceTR=0, TFF=None, pscrn=None, int16_prescreener=None, int16_predictor=None, exp=None, alpha=None, beta=None, gamma=None, nrad=None, vcheck=None, opencl=False, device=None): - def Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=None): + global dfttest2 + if dfttest2 is None : + import dfttest2 + + def _Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=None): if not (isinstance(clip, vs.VideoNode) and isinstance(bright_limit, vs.VideoNode) and isinstance(dark_limit, vs.VideoNode)): - raise vs.Error('Clamp: this is not a clip') + raise vs.Error('_Clamp: this is not a clip') if bright_limit.format.id != clip.format.id or dark_limit.format.id != clip.format.id: - raise vs.Error('Clamp: clips must have the same format') + raise vs.Error('_Clamp: clips must have the same format') if planes is None: planes = list(range(clip.format.num_planes)) elif isinstance(planes, int): @@ -1657,11 +1671,11 @@ def Clamp(clip, bright_limit, dark_limit, overshoot=0, undershoot=0, planes=None expr = f'x y {overshoot} + > y {overshoot} + x ? z {undershoot} - < z {undershoot} - x y {overshoot} + > y {overshoot} + x ? ?' return core.std.Expr([clip, bright_limit, dark_limit], expr=[expr if i in planes else '' for i in range(clip.format.num_planes)]) - def DitherLumaRebuild(src, s0=2.0, c=0.0625, chroma=True): + def _DitherLumaRebuild(src, s0=2.0, c=0.0625, chroma=True): if not isinstance(src, vs.VideoNode): - raise vs.Error('DitherLumaRebuild: this is not a clip') + raise vs.Error('_DitherLumaRebuild: this is not a clip') if src.format.color_family == vs.RGB: - raise vs.Error('DitherLumaRebuild: RGB format is not supported') + raise vs.Error('_DitherLumaRebuild: RGB format is not supported') isGray = (src.format.color_family == vs.GRAY) isInteger = (src.format.sample_type == vs.INTEGER) shift = src.format.bits_per_sample - 8 @@ -1671,15 +1685,15 @@ def DitherLumaRebuild(src, s0=2.0, c=0.0625, chroma=True): e = f'{k} {1 + c} {(1 + c) * c} {t} {c} + / - * {t} 1 {k} - * + {256 << shift if isInteger else 256 / 255} *' return src.std.Expr(expr=[e] if isGray else [e, f'x {neutral} - 128 * 112 / {neutral} +' if chroma else '']) - def Gauss(clip, p=None, sigma=None, planes=None): + def _Gauss(clip, p=None, sigma=None, planes=None): if not isinstance(clip, vs.VideoNode): - raise vs.Error('Gauss: this is not a clip') + raise vs.Error('_Gauss: this is not a clip') if p is None and sigma is None: - raise vs.Error('Gauss: must have p or sigma') + raise vs.Error('_Gauss: must have p or sigma') if p is not None and not 0.385 <= p <= 64.921: - raise vs.Error('Gauss: p must be between 0.385 and 64.921 (inclusive)') + raise vs.Error('_Gauss: p must be between 0.385 and 64.921 (inclusive)') if sigma is not None and not 0.334 <= sigma <= 4.333: - raise vs.Error('Gauss: sigma must be between 0.334 and 4.333 (inclusive)') + raise vs.Error('_Gauss: sigma must be between 0.334 and 4.333 (inclusive)') if sigma is None and p is not None: # Translate AviSynth parameter to standard parameter. sigma = math.sqrt(1.0 / (2.0 * (p / 10.0) * math.log(2))) @@ -2010,7 +2024,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): if SrchClipPP == 1: spatialBlur = repair0.resize.Bilinear(w // 2, h // 2).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes).resize.Bilinear(w, h) elif SrchClipPP >= 2: - spatialBlur = Gauss(repair0.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes), p=2.35) + spatialBlur = _Gauss(repair0.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=CMplanes), p=2.35) if SrchClipPP > 1: spatialBlur = core.std.Merge(spatialBlur, repair0, weight=[0.1] if ChromaMotion or isGray else [0.1, 0]) if SrchClipPP <= 0: @@ -2027,7 +2041,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): if maxTR > 0: analyse_args = dict(blksize=BlockSize, overlap=Overlap, search=Search, searchparam=SearchParam, pelsearch=PelSearch, truemotion=TrueMotion, lambda_=Lambda, lsad=LSAD, pnew=PNew, plevel=PLevel, global_=GlobalMotion, dct=DCT, chroma=ChromaMotion) - srchSuper = DitherLumaRebuild(srchClip, s0=1, chroma=ChromaMotion).mv.Super(pel=SubPel, sharp=SubPelInterp, hpad=hpad, vpad=vpad, chroma=ChromaMotion) + srchSuper = _DitherLumaRebuild(srchClip, s0=1, chroma=ChromaMotion).mv.Super(pel=SubPel, sharp=SubPelInterp, hpad=hpad, vpad=vpad, chroma=ChromaMotion) bVec1 = srchSuper.mv.Analyse(isb=True, delta=1, **analyse_args) fVec1 = srchSuper.mv.Analyse(isb=False, delta=1, **analyse_args) if maxTR > 1: @@ -2064,7 +2078,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): core.mv.Compensate(fullClip, fullSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2), core.mv.Compensate(fullClip, fullSuper, bVec2, thscd1=ThSCD1, thscd2=ThSCD2)]) if Denoiser == 'dfttest': - dnWindow = noiseWindow.dfttest.DFTTest(sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) + dnWindow = dfttest2.DFTTest(clip=noiseWindow, sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes) #TODO:GPU elif Denoiser == 'knlmeanscl': if ChromaNoise and not isGray: dnWindow = KNLMeansCL(noiseWindow, d=NoiseTR, h=Sigma) @@ -2227,7 +2241,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): if Sbb not in [1, 3]: backBlend1 = thin else: - backBlend1 = core.std.MakeDiff(thin, Gauss(core.std.MakeDiff(thin, lossed1, planes=[0]).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0]), p=5), planes=[0]) + backBlend1 = core.std.MakeDiff(thin, _Gauss(core.std.MakeDiff(thin, lossed1, planes=[0]).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0]), p=5), planes=[0]) # Limit over-sharpening by clamping to neighboring (spatial or temporal) min/max values in original # Occurs here (before final temporal smooth) if SLMode == 1,2. This location will restrict sharpness more, but any artefacts introduced will be smoothed @@ -2237,7 +2251,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): else: sharpLimit1 = core.rgvs.Repair(backBlend1, core.rgvs.Repair(backBlend1, edi, mode=[12]), mode=[1]) elif SLMode == 2: - sharpLimit1 = Clamp(backBlend1, tMax, tMin, SOvs, SOvs) + sharpLimit1 = _Clamp(backBlend1, tMax, tMin, SOvs, SOvs) else: sharpLimit1 = backBlend1 @@ -2245,7 +2259,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): if Sbb < 2: backBlend2 = sharpLimit1 else: - backBlend2 = core.std.MakeDiff(sharpLimit1, Gauss(core.std.MakeDiff(sharpLimit1, lossed1, planes=[0]).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0]), p=5), planes=[0]) + backBlend2 = core.std.MakeDiff(sharpLimit1, _Gauss(core.std.MakeDiff(sharpLimit1, lossed1, planes=[0]).std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=[0]), p=5), planes=[0]) # Add back any extracted noise, prior to final temporal smooth - this will restore detail that was removed as "noise" without restoring the noise itself # Average luma of FFT3DFilter extracted noise is 128.5, so deal with that too @@ -2281,7 +2295,7 @@ def Gauss(clip, p=None, sigma=None, planes=None): else: sharpLimit2 = core.rgvs.Repair(repair2, core.rgvs.Repair(repair2, edi, mode=[12]), mode=[1]) elif SLMode >= 4: - sharpLimit2 = Clamp(repair2, tMax, tMin, SOvs, SOvs) + sharpLimit2 = _Clamp(repair2, tMax, tMin, SOvs, SOvs) else: sharpLimit2 = repair2 @@ -2636,27 +2650,30 @@ def QTGMCv2( tff : typing.Literal[0, 1, 2] = 0, cpu : bool = True, gpu : typing.Literal[-1, 0, 1, 2] = -1, + check : bool = True, ) -> vs.VideoNode: - func_name = "QTGMCv2" - if not isinstance(input, vs.VideoNode) : - raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") - if fps_in <= 0.0 : - raise vs.Error(f"模块 {func_name} 的子参数 fps_in 的值无效") - if not isinstance(obs, bool) : - raise vs.Error(f"模块 {func_name} 的子参数 obs 的值无效") - if deint_lv not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] : - raise vs.Error(f"模块 {func_name} 的子参数 deint_lv 的值无效") - if src_type not in [0, 1, 2, 3] : - raise vs.Error(f"模块 {func_name} 的子参数 src_type 的值无效") - if deint_den not in [1, 2] : - raise vs.Error(f"模块 {func_name} 的子参数 deint_den 的值无效") - if tff not in [0, 1, 2] : - raise vs.Error(f"模块 {func_name} 的子参数 tff 的值无效") - if not isinstance(cpu, bool) : - raise vs.Error(f"模块 {func_name} 的子参数 cpu 的值无效") - if gpu not in [-1, 0, 1, 2] : - raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if check : + func_name = "QTGMCv2" + if not isinstance(input, vs.VideoNode) : + raise vs.Error(f"模块 {func_name} 的子参数 input 的值无效") + if fps_in <= 0.0 : + raise vs.Error(f"模块 {func_name} 的子参数 fps_in 的值无效") + if not isinstance(obs, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 obs 的值无效") + if deint_lv not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] : + raise vs.Error(f"模块 {func_name} 的子参数 deint_lv 的值无效") + if src_type not in [0, 1, 2, 3] : + raise vs.Error(f"模块 {func_name} 的子参数 src_type 的值无效") + if deint_den not in [1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 deint_den 的值无效") + if tff not in [0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 tff 的值无效") + if not isinstance(cpu, bool) : + raise vs.Error(f"模块 {func_name} 的子参数 cpu 的值无效") + if gpu not in [-1, 0, 1, 2] : + raise vs.Error(f"模块 {func_name} 的子参数 gpu 的值无效") + if not tff : field_src = getattr(input.get_frame(0).props, "_FieldBased", 1) if field_src == 0 :