mirror of
				https://github.com/k4yt3x/video2x.git
				synced 2025-10-31 21:00:58 +01:00 
			
		
		
		
	Merge pull request #695 from k4yt3x/feat/decouple
This commit is contained in:
		
						commit
						508d6ea4d0
					
				| @ -31,4 +31,4 @@ __version__ = "5.0.0-beta5" | |||||||
| # generated by the following lines | # generated by the following lines | ||||||
| from .interpolator import Interpolator | from .interpolator import Interpolator | ||||||
| from .upscaler import Upscaler | from .upscaler import Upscaler | ||||||
| from .video2x import Video2X, main | from .video2x import Video2X | ||||||
|  | |||||||
| @ -22,9 +22,218 @@ Date Created: July 3, 2021 | |||||||
| Last Modified: February 26, 2022 | Last Modified: February 26, 2022 | ||||||
| """ | """ | ||||||
| 
 | 
 | ||||||
|  | import argparse | ||||||
|  | import os | ||||||
|  | import pathlib | ||||||
| import sys | import sys | ||||||
| 
 | 
 | ||||||
| from .video2x import main | from loguru import logger | ||||||
|  | from rich import print as rich_print | ||||||
|  | 
 | ||||||
|  | from . import __version__ | ||||||
|  | from .video2x import LOGURU_FORMAT, Video2X | ||||||
|  | 
 | ||||||
|  | LEGAL_INFO = f"""Video2X\t\t{__version__} | ||||||
|  | Author:\t\tK4YT3X | ||||||
|  | License:\tGNU AGPL v3 | ||||||
|  | Github Page:\thttps://github.com/k4yt3x/video2x | ||||||
|  | Contact:\ti@k4yt3x.com""" | ||||||
|  | 
 | ||||||
|  | # algorithms available for upscaling tasks | ||||||
|  | UPSCALING_ALGORITHMS = [ | ||||||
|  |     "waifu2x", | ||||||
|  |     "srmd", | ||||||
|  |     "realsr", | ||||||
|  |     "realcugan", | ||||||
|  | ] | ||||||
|  | 
 | ||||||
|  | # algorithms available for frame interpolation tasks | ||||||
|  | INTERPOLATION_ALGORITHMS = ["rife"] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def parse_arguments() -> argparse.Namespace: | ||||||
|  |     """ | ||||||
|  |     parse command line arguments | ||||||
|  | 
 | ||||||
|  |     :rtype argparse.Namespace: command parsing results | ||||||
|  |     """ | ||||||
|  |     parser = argparse.ArgumentParser( | ||||||
|  |         prog="video2x", | ||||||
|  |         formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||||||
|  |     ) | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--version", help="show version information and exit", action="store_true" | ||||||
|  |     ) | ||||||
|  |     parser.add_argument( | ||||||
|  |         "-i", | ||||||
|  |         "--input", | ||||||
|  |         type=pathlib.Path, | ||||||
|  |         help="input file/directory path", | ||||||
|  |         required=True, | ||||||
|  |     ) | ||||||
|  |     parser.add_argument( | ||||||
|  |         "-o", | ||||||
|  |         "--output", | ||||||
|  |         type=pathlib.Path, | ||||||
|  |         help="output file/directory path", | ||||||
|  |         required=True, | ||||||
|  |     ) | ||||||
|  |     parser.add_argument( | ||||||
|  |         "-p", "--processes", type=int, help="number of processes to launch", default=1 | ||||||
|  |     ) | ||||||
|  |     parser.add_argument( | ||||||
|  |         "-l", | ||||||
|  |         "--loglevel", | ||||||
|  |         choices=["trace", "debug", "info", "success", "warning", "error", "critical"], | ||||||
|  |         default="info", | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     # upscaler arguments | ||||||
|  |     action = parser.add_subparsers( | ||||||
|  |         help="action to perform", dest="action", required=True | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     upscale = action.add_parser( | ||||||
|  |         "upscale", | ||||||
|  |         help="upscale a file", | ||||||
|  |         formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||||||
|  |         add_help=False, | ||||||
|  |     ) | ||||||
|  |     upscale.add_argument( | ||||||
|  |         "--help", action="help", help="show this help message and exit" | ||||||
|  |     ) | ||||||
|  |     upscale.add_argument("-w", "--width", type=int, help="output width") | ||||||
|  |     upscale.add_argument("-h", "--height", type=int, help="output height") | ||||||
|  |     upscale.add_argument("-n", "--noise", type=int, help="denoise level", default=3) | ||||||
|  |     upscale.add_argument( | ||||||
|  |         "-a", | ||||||
|  |         "--algorithm", | ||||||
|  |         choices=UPSCALING_ALGORITHMS, | ||||||
|  |         help="algorithm to use for upscaling", | ||||||
|  |         default=UPSCALING_ALGORITHMS[0], | ||||||
|  |     ) | ||||||
|  |     upscale.add_argument( | ||||||
|  |         "-t", | ||||||
|  |         "--threshold", | ||||||
|  |         type=float, | ||||||
|  |         help=( | ||||||
|  |             "skip if the percent difference between two adjacent frames is below this" | ||||||
|  |             " value; set to 0 to process all frames" | ||||||
|  |         ), | ||||||
|  |         default=0, | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     # interpolator arguments | ||||||
|  |     interpolate = action.add_parser( | ||||||
|  |         "interpolate", | ||||||
|  |         help="interpolate frames for file", | ||||||
|  |         formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||||||
|  |         add_help=False, | ||||||
|  |     ) | ||||||
|  |     interpolate.add_argument( | ||||||
|  |         "--help", action="help", help="show this help message and exit" | ||||||
|  |     ) | ||||||
|  |     interpolate.add_argument( | ||||||
|  |         "-a", | ||||||
|  |         "--algorithm", | ||||||
|  |         choices=UPSCALING_ALGORITHMS, | ||||||
|  |         help="algorithm to use for upscaling", | ||||||
|  |         default=INTERPOLATION_ALGORITHMS[0], | ||||||
|  |     ) | ||||||
|  |     interpolate.add_argument( | ||||||
|  |         "-t", | ||||||
|  |         "--threshold", | ||||||
|  |         type=float, | ||||||
|  |         help=( | ||||||
|  |             "skip if the percent difference between two adjacent frames exceeds this" | ||||||
|  |             " value; set to 100 to interpolate all frames" | ||||||
|  |         ), | ||||||
|  |         default=10, | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     return parser.parse_args() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def main() -> int: | ||||||
|  |     """ | ||||||
|  |     command line entrypoint for direct CLI invocation | ||||||
|  | 
 | ||||||
|  |     :rtype int: 0 if completed successfully, else other int | ||||||
|  |     """ | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         # display version and lawful informaition | ||||||
|  |         if "--version" in sys.argv: | ||||||
|  |             rich_print(LEGAL_INFO) | ||||||
|  |             return 0 | ||||||
|  | 
 | ||||||
|  |         # parse command line arguments | ||||||
|  |         args = parse_arguments() | ||||||
|  | 
 | ||||||
|  |         # check input/output file paths | ||||||
|  |         if not args.input.exists(): | ||||||
|  |             logger.critical(f"Cannot find input file: {args.input}") | ||||||
|  |             return 1 | ||||||
|  |         if not args.input.is_file(): | ||||||
|  |             logger.critical("Input path is not a file") | ||||||
|  |             return 1 | ||||||
|  |         if not args.output.parent.exists(): | ||||||
|  |             logger.critical(f"Output directory does not exist: {args.output.parent}") | ||||||
|  |             return 1 | ||||||
|  | 
 | ||||||
|  |         # set logger level | ||||||
|  |         if os.environ.get("LOGURU_LEVEL") is None: | ||||||
|  |             os.environ["LOGURU_LEVEL"] = args.loglevel.upper() | ||||||
|  | 
 | ||||||
|  |         # remove default handler | ||||||
|  |         logger.remove() | ||||||
|  | 
 | ||||||
|  |         # add new sink with custom handler | ||||||
|  |         logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) | ||||||
|  | 
 | ||||||
|  |         # print package version and copyright notice | ||||||
|  |         logger.opt(colors=True).info(f"<magenta>Video2X {__version__}</magenta>") | ||||||
|  |         logger.opt(colors=True).info( | ||||||
|  |             "<magenta>Copyright (C) 2018-2022 K4YT3X and contributors.</magenta>" | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         # initialize video2x object | ||||||
|  |         video2x = Video2X() | ||||||
|  | 
 | ||||||
|  |         if args.action == "upscale": | ||||||
|  |             video2x.upscale( | ||||||
|  |                 args.input, | ||||||
|  |                 args.output, | ||||||
|  |                 args.width, | ||||||
|  |                 args.height, | ||||||
|  |                 args.noise, | ||||||
|  |                 args.processes, | ||||||
|  |                 args.threshold, | ||||||
|  |                 args.algorithm, | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |         elif args.action == "interpolate": | ||||||
|  |             video2x.interpolate( | ||||||
|  |                 args.input, | ||||||
|  |                 args.output, | ||||||
|  |                 args.processes, | ||||||
|  |                 args.threshold, | ||||||
|  |                 args.algorithm, | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |     # don't print the traceback for manual terminations | ||||||
|  |     except KeyboardInterrupt: | ||||||
|  |         return 2 | ||||||
|  | 
 | ||||||
|  |     except Exception as error: | ||||||
|  |         logger.exception(error) | ||||||
|  |         return 1 | ||||||
|  | 
 | ||||||
|  |     # if no exceptions were produced | ||||||
|  |     else: | ||||||
|  |         logger.success("Processing completed successfully") | ||||||
|  |         return 0 | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||||
|     sys.exit(main()) |     sys.exit(main()) | ||||||
|  | |||||||
| @ -19,22 +19,19 @@ along with this program. If not, see <https://www.gnu.org/licenses/>. | |||||||
| Name: Video Decoder | Name: Video Decoder | ||||||
| Author: K4YT3X | Author: K4YT3X | ||||||
| Date Created: June 17, 2021 | Date Created: June 17, 2021 | ||||||
| Last Modified: March 21, 2022 | Last Modified: April 9, 2022 | ||||||
| """ | """ | ||||||
| 
 | 
 | ||||||
| import contextlib | import contextlib | ||||||
| import multiprocessing |  | ||||||
| import os | import os | ||||||
| import pathlib | import pathlib | ||||||
| import queue |  | ||||||
| import signal | import signal | ||||||
| import subprocess | import subprocess | ||||||
| import threading | from multiprocessing import Queue | ||||||
| import time | from queue import Full | ||||||
| from multiprocessing.sharedctypes import Synchronized | from threading import Thread | ||||||
| 
 | 
 | ||||||
| import ffmpeg | import ffmpeg | ||||||
| from loguru import logger |  | ||||||
| from PIL import Image | from PIL import Image | ||||||
| 
 | 
 | ||||||
| from .pipe_printer import PipePrinter | from .pipe_printer import PipePrinter | ||||||
| @ -51,36 +48,38 @@ LOGURU_FFMPEG_LOGLEVELS = { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class VideoDecoder(threading.Thread): | class VideoDecoder: | ||||||
|  |     """ | ||||||
|  |     A video decoder that generates frames read from FFmpeg. | ||||||
|  | 
 | ||||||
|  |     :param input_path pathlib.Path: the input file's path | ||||||
|  |     :param input_width int: the input file's width | ||||||
|  |     :param input_height int: the input file's height | ||||||
|  |     :param frame_rate float: the input file's frame rate | ||||||
|  |     :param pil_ignore_max_image_pixels bool: setting this to True | ||||||
|  |         disables PIL's "possible DDoS" warning | ||||||
|  |     """ | ||||||
|  | 
 | ||||||
|     def __init__( |     def __init__( | ||||||
|         self, |         self, | ||||||
|         input_path: pathlib.Path, |         input_path: pathlib.Path, | ||||||
|         input_width: int, |         input_width: int, | ||||||
|         input_height: int, |         input_height: int, | ||||||
|         frame_rate: float, |         frame_rate: float, | ||||||
|         processing_queue: multiprocessing.Queue, |         pil_ignore_max_image_pixels: bool = True, | ||||||
|         processing_settings: tuple, |  | ||||||
|         pause: Synchronized, |  | ||||||
|         ignore_max_image_pixels=True, |  | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         threading.Thread.__init__(self) |  | ||||||
|         self.running = False |  | ||||||
|         self.input_path = input_path |         self.input_path = input_path | ||||||
|         self.input_width = input_width |         self.input_width = input_width | ||||||
|         self.input_height = input_height |         self.input_height = input_height | ||||||
|         self.processing_queue = processing_queue |  | ||||||
|         self.processing_settings = processing_settings |  | ||||||
|         self.pause = pause |  | ||||||
| 
 | 
 | ||||||
|         # this disables the "possible DDoS" warning |         # this disables the "possible DDoS" warning | ||||||
|         if ignore_max_image_pixels: |         if pil_ignore_max_image_pixels is True: | ||||||
|             Image.MAX_IMAGE_PIXELS = None |             Image.MAX_IMAGE_PIXELS = None | ||||||
| 
 | 
 | ||||||
|         self.exception = None |  | ||||||
|         self.decoder = subprocess.Popen( |         self.decoder = subprocess.Popen( | ||||||
|             ffmpeg.compile( |             ffmpeg.compile( | ||||||
|                 ffmpeg.input(input_path, r=frame_rate)["v"] |                 ffmpeg.input(input_path, r=frame_rate)["v"] | ||||||
|                 .output("pipe:1", format="rawvideo", pix_fmt="rgb24", vsync="cfr") |                 .output("pipe:1", format="rawvideo", pix_fmt="rgb24", fps_mode="cfr") | ||||||
|                 .global_args("-hide_banner") |                 .global_args("-hide_banner") | ||||||
|                 .global_args("-nostats") |                 .global_args("-nostats") | ||||||
|                 .global_args("-nostdin") |                 .global_args("-nostdin") | ||||||
| @ -102,83 +101,33 @@ class VideoDecoder(threading.Thread): | |||||||
|         self.pipe_printer = PipePrinter(self.decoder.stderr) |         self.pipe_printer = PipePrinter(self.decoder.stderr) | ||||||
|         self.pipe_printer.start() |         self.pipe_printer.start() | ||||||
| 
 | 
 | ||||||
|     def run(self) -> None: |     def __iter__(self): | ||||||
|         self.running = True |  | ||||||
| 
 | 
 | ||||||
|         # the index of the frame |         # continue yielding while FFmpeg continues to produce output | ||||||
|         frame_index = 0 |         while ( | ||||||
| 
 |             len( | ||||||
|         # create placeholder for previous frame |                 buffer := self.decoder.stdout.read( | ||||||
|         # used in interpolate mode |  | ||||||
|         previous_image = None |  | ||||||
| 
 |  | ||||||
|         # continue running until an exception occurs |  | ||||||
|         # or all frames have been decoded |  | ||||||
|         while self.running is True: |  | ||||||
| 
 |  | ||||||
|             # pause if pause flag is set |  | ||||||
|             if self.pause.value is True: |  | ||||||
|                 time.sleep(0.1) |  | ||||||
|                 continue |  | ||||||
| 
 |  | ||||||
|             try: |  | ||||||
|                 buffer = self.decoder.stdout.read( |  | ||||||
|                     3 * self.input_width * self.input_height |                     3 * self.input_width * self.input_height | ||||||
|                 ) |                 ) | ||||||
| 
 |             ) | ||||||
|                 # source depleted (decoding finished) |             > 0 | ||||||
|                 # after the last frame has been decoded |         ): | ||||||
|                 # read will return nothing |  | ||||||
|                 if len(buffer) == 0: |  | ||||||
|                     self.stop() |  | ||||||
|                     continue |  | ||||||
| 
 | 
 | ||||||
|             # convert raw bytes into image object |             # convert raw bytes into image object | ||||||
|                 image = Image.frombytes( |             frame = Image.frombytes( | ||||||
|                 "RGB", (self.input_width, self.input_height), buffer |                 "RGB", (self.input_width, self.input_height), buffer | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|                 # keep checking if the running flag is set to False |             # return this frame | ||||||
|                 # while waiting to put the next image into the queue |             yield frame | ||||||
|                 while self.running is True: |  | ||||||
|                     with contextlib.suppress(queue.Full): |  | ||||||
|                         self.processing_queue.put( |  | ||||||
|                             ( |  | ||||||
|                                 frame_index, |  | ||||||
|                                 (previous_image, image), |  | ||||||
|                                 self.processing_settings, |  | ||||||
|                             ), |  | ||||||
|                             timeout=0.1, |  | ||||||
|                         ) |  | ||||||
|                         break |  | ||||||
| 
 | 
 | ||||||
|                 previous_image = image |         # automatically self-join and clean up after iterations are done | ||||||
|                 frame_index += 1 |         self.join() | ||||||
| 
 | 
 | ||||||
|             # most likely "not enough image data" |     def kill(self): | ||||||
|             except ValueError as error: |         self.decoder.send_signal(signal.SIGKILL) | ||||||
|                 self.exception = error |  | ||||||
| 
 | 
 | ||||||
|                 # ignore queue closed |     def join(self): | ||||||
|                 if "is closed" not in str(error): |  | ||||||
|                     logger.exception(error) |  | ||||||
|                 break |  | ||||||
| 
 |  | ||||||
|             # send exceptions into the client connection pipe |  | ||||||
|             except Exception as error: |  | ||||||
|                 self.exception = error |  | ||||||
|                 logger.exception(error) |  | ||||||
|                 break |  | ||||||
|         else: |  | ||||||
|             logger.debug("Decoding queue depleted") |  | ||||||
| 
 |  | ||||||
|         # flush the remaining data in STDOUT and STDERR |  | ||||||
|         self.decoder.stdout.flush() |  | ||||||
|         self.decoder.stderr.flush() |  | ||||||
| 
 |  | ||||||
|         # send SIGINT (2) to FFmpeg |  | ||||||
|         # this instructs it to finalize and exit |  | ||||||
|         self.decoder.send_signal(signal.SIGINT) |  | ||||||
| 
 | 
 | ||||||
|         # close PIPEs to prevent process from getting stuck |         # close PIPEs to prevent process from getting stuck | ||||||
|         self.decoder.stdout.close() |         self.decoder.stdout.close() | ||||||
| @ -191,8 +140,38 @@ class VideoDecoder(threading.Thread): | |||||||
|         self.pipe_printer.stop() |         self.pipe_printer.stop() | ||||||
|         self.pipe_printer.join() |         self.pipe_printer.join() | ||||||
| 
 | 
 | ||||||
|         logger.info("Decoder thread exiting") |  | ||||||
|         return super().run() |  | ||||||
| 
 | 
 | ||||||
|     def stop(self) -> None: | class VideoDecoderThread(Thread): | ||||||
|  |     def __init__( | ||||||
|  |         self, tasks_queue: Queue, decoder: VideoDecoder, processing_settings: tuple | ||||||
|  |     ): | ||||||
|  |         super().__init__() | ||||||
|  | 
 | ||||||
|  |         self.tasks_queue = tasks_queue | ||||||
|  |         self.decoder = decoder | ||||||
|  |         self.processing_settings = processing_settings | ||||||
|  |         self.running = False | ||||||
|  | 
 | ||||||
|  |     def run(self): | ||||||
|  |         self.running = True | ||||||
|  |         previous_frame = None | ||||||
|  |         for frame_index, frame in enumerate(self.decoder): | ||||||
|  | 
 | ||||||
|  |             while True: | ||||||
|  | 
 | ||||||
|  |                 # check for the stop signal | ||||||
|  |                 if self.running is False: | ||||||
|  |                     self.decoder.join() | ||||||
|  |                     return | ||||||
|  | 
 | ||||||
|  |                 with contextlib.suppress(Full): | ||||||
|  |                     self.tasks_queue.put( | ||||||
|  |                         (frame_index, previous_frame, frame, self.processing_settings), | ||||||
|  |                         timeout=0.1, | ||||||
|  |                     ) | ||||||
|  |                     break | ||||||
|  | 
 | ||||||
|  |             previous_frame = frame | ||||||
|  | 
 | ||||||
|  |     def stop(self): | ||||||
|         self.running = False |         self.running = False | ||||||
|  | |||||||
| @ -19,20 +19,16 @@ along with this program. If not, see <https://www.gnu.org/licenses/>. | |||||||
| Name: Video Encoder | Name: Video Encoder | ||||||
| Author: K4YT3X | Author: K4YT3X | ||||||
| Date Created: June 17, 2021 | Date Created: June 17, 2021 | ||||||
| Last Modified: March 20, 2022 | Last Modified: August 28, 2022 | ||||||
| """ | """ | ||||||
| 
 | 
 | ||||||
| import os | import os | ||||||
| import pathlib | import pathlib | ||||||
| import signal | import signal | ||||||
| import subprocess | import subprocess | ||||||
| import threading |  | ||||||
| import time |  | ||||||
| from multiprocessing.managers import ListProxy |  | ||||||
| from multiprocessing.sharedctypes import Synchronized |  | ||||||
| 
 | 
 | ||||||
| import ffmpeg | import ffmpeg | ||||||
| from loguru import logger | from PIL import Image | ||||||
| 
 | 
 | ||||||
| from .pipe_printer import PipePrinter | from .pipe_printer import PipePrinter | ||||||
| 
 | 
 | ||||||
| @ -48,7 +44,7 @@ LOGURU_FFMPEG_LOGLEVELS = { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class VideoEncoder(threading.Thread): | class VideoEncoder: | ||||||
|     def __init__( |     def __init__( | ||||||
|         self, |         self, | ||||||
|         input_path: pathlib.Path, |         input_path: pathlib.Path, | ||||||
| @ -56,36 +52,20 @@ class VideoEncoder(threading.Thread): | |||||||
|         output_path: pathlib.Path, |         output_path: pathlib.Path, | ||||||
|         output_width: int, |         output_width: int, | ||||||
|         output_height: int, |         output_height: int, | ||||||
|         total_frames: int, |  | ||||||
|         processed_frames: ListProxy, |  | ||||||
|         processed: Synchronized, |  | ||||||
|         pause: Synchronized, |  | ||||||
|         copy_audio: bool = True, |         copy_audio: bool = True, | ||||||
|         copy_subtitle: bool = True, |         copy_subtitle: bool = True, | ||||||
|         copy_data: bool = False, |         copy_data: bool = False, | ||||||
|         copy_attachments: bool = False, |         copy_attachments: bool = False, | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         threading.Thread.__init__(self) |  | ||||||
|         self.running = False |  | ||||||
|         self.input_path = input_path |  | ||||||
|         self.output_path = output_path |  | ||||||
|         self.total_frames = total_frames |  | ||||||
|         self.processed_frames = processed_frames |  | ||||||
|         self.processed = processed |  | ||||||
|         self.pause = pause |  | ||||||
| 
 |  | ||||||
|         # stores exceptions if the thread exits with errors |  | ||||||
|         self.exception = None |  | ||||||
| 
 | 
 | ||||||
|         # create FFmpeg input for the original input video |         # create FFmpeg input for the original input video | ||||||
|         self.original = ffmpeg.input(input_path) |         original = ffmpeg.input(input_path) | ||||||
| 
 | 
 | ||||||
|         # define frames as input |         # define frames as input | ||||||
|         frames = ffmpeg.input( |         frames = ffmpeg.input( | ||||||
|             "pipe:0", |             "pipe:0", | ||||||
|             format="rawvideo", |             format="rawvideo", | ||||||
|             pix_fmt="rgb24", |             pix_fmt="rgb24", | ||||||
|             vsync="cfr", |  | ||||||
|             s=f"{output_width}x{output_height}", |             s=f"{output_width}x{output_height}", | ||||||
|             r=frame_rate, |             r=frame_rate, | ||||||
|         ) |         ) | ||||||
| @ -93,11 +73,11 @@ class VideoEncoder(threading.Thread): | |||||||
|         # copy additional streams from original file |         # copy additional streams from original file | ||||||
|         # https://ffmpeg.org/ffmpeg.html#Stream-specifiers-1 |         # https://ffmpeg.org/ffmpeg.html#Stream-specifiers-1 | ||||||
|         additional_streams = [ |         additional_streams = [ | ||||||
|             # self.original["1:v?"], |             # original["1:v?"], | ||||||
|             self.original["a?"] if copy_audio is True else None, |             original["a?"] if copy_audio is True else None, | ||||||
|             self.original["s?"] if copy_subtitle is True else None, |             original["s?"] if copy_subtitle is True else None, | ||||||
|             self.original["d?"] if copy_data is True else None, |             original["d?"] if copy_data is True else None, | ||||||
|             self.original["t?"] if copy_attachments is True else None, |             original["t?"] if copy_attachments is True else None, | ||||||
|         ] |         ] | ||||||
| 
 | 
 | ||||||
|         # run FFmpeg and produce final output |         # run FFmpeg and produce final output | ||||||
| @ -106,10 +86,10 @@ class VideoEncoder(threading.Thread): | |||||||
|                 ffmpeg.output( |                 ffmpeg.output( | ||||||
|                     frames, |                     frames, | ||||||
|                     *[s for s in additional_streams if s is not None], |                     *[s for s in additional_streams if s is not None], | ||||||
|                     str(self.output_path), |                     str(output_path), | ||||||
|                     vcodec="libx264", |                     vcodec="libx264", | ||||||
|                     scodec="copy", |                     scodec="copy", | ||||||
|                     vsync="cfr", |                     fps_mode="cfr", | ||||||
|                     pix_fmt="yuv420p", |                     pix_fmt="yuv420p", | ||||||
|                     crf=17, |                     crf=17, | ||||||
|                     preset="veryslow", |                     preset="veryslow", | ||||||
| @ -138,49 +118,26 @@ class VideoEncoder(threading.Thread): | |||||||
|         self.pipe_printer = PipePrinter(self.encoder.stderr) |         self.pipe_printer = PipePrinter(self.encoder.stderr) | ||||||
|         self.pipe_printer.start() |         self.pipe_printer.start() | ||||||
| 
 | 
 | ||||||
|     def run(self) -> None: |     def kill(self): | ||||||
|         self.running = True |         self.encoder.send_signal(signal.SIGKILL) | ||||||
|         frame_index = 0 |  | ||||||
|         while self.running and frame_index < self.total_frames: |  | ||||||
| 
 | 
 | ||||||
|             # pause if pause flag is set |     def write(self, frame: Image.Image) -> None: | ||||||
|             if self.pause.value is True: |         """ | ||||||
|                 time.sleep(0.1) |         write a frame into FFmpeg encoder's STDIN | ||||||
|                 continue |  | ||||||
| 
 | 
 | ||||||
|             try: |         :param frame Image.Image: the Image object to use for writing | ||||||
|                 image = self.processed_frames[frame_index] |         """ | ||||||
|                 if image is None: |         self.encoder.stdin.write(frame.tobytes()) | ||||||
|                     time.sleep(0.1) |  | ||||||
|                     continue |  | ||||||
| 
 |  | ||||||
|                 # send the image to FFmpeg for encoding |  | ||||||
|                 self.encoder.stdin.write(image.tobytes()) |  | ||||||
| 
 |  | ||||||
|                 # remove the image from memory |  | ||||||
|                 self.processed_frames[frame_index] = None |  | ||||||
| 
 |  | ||||||
|                 with self.processed.get_lock(): |  | ||||||
|                     self.processed.value += 1 |  | ||||||
| 
 |  | ||||||
|                 frame_index += 1 |  | ||||||
| 
 |  | ||||||
|             # send exceptions into the client connection pipe |  | ||||||
|             except Exception as error: |  | ||||||
|                 self.exception = error |  | ||||||
|                 logger.exception(error) |  | ||||||
|                 break |  | ||||||
|         else: |  | ||||||
|             logger.debug("Encoding queue depleted") |  | ||||||
| 
 | 
 | ||||||
|  |     def join(self) -> None: | ||||||
|  |         """ | ||||||
|  |         signal the encoder that all frames have been sent and the FFmpeg | ||||||
|  |         should be instructed to wrap-up the processing | ||||||
|  |         """ | ||||||
|         # flush the remaining data in STDIN and STDERR |         # flush the remaining data in STDIN and STDERR | ||||||
|         self.encoder.stdin.flush() |         self.encoder.stdin.flush() | ||||||
|         self.encoder.stderr.flush() |         self.encoder.stderr.flush() | ||||||
| 
 | 
 | ||||||
|         # send SIGINT (2) to FFmpeg |  | ||||||
|         # this instructs it to finalize and exit |  | ||||||
|         self.encoder.send_signal(signal.SIGINT) |  | ||||||
| 
 |  | ||||||
|         # close PIPEs to prevent process from getting stuck |         # close PIPEs to prevent process from getting stuck | ||||||
|         self.encoder.stdin.close() |         self.encoder.stdin.close() | ||||||
|         self.encoder.stderr.close() |         self.encoder.stderr.close() | ||||||
| @ -191,9 +148,3 @@ class VideoEncoder(threading.Thread): | |||||||
|         # wait for PIPE printer to exit |         # wait for PIPE printer to exit | ||||||
|         self.pipe_printer.stop() |         self.pipe_printer.stop() | ||||||
|         self.pipe_printer.join() |         self.pipe_printer.join() | ||||||
| 
 |  | ||||||
|         logger.info("Encoder thread exiting") |  | ||||||
|         return super().run() |  | ||||||
| 
 |  | ||||||
|     def stop(self) -> None: |  | ||||||
|         self.running = False |  | ||||||
|  | |||||||
| @ -44,11 +44,13 @@ class Interpolator(multiprocessing.Process): | |||||||
|         pause: Synchronized, |         pause: Synchronized, | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         multiprocessing.Process.__init__(self) |         multiprocessing.Process.__init__(self) | ||||||
|         self.running = False |  | ||||||
|         self.processing_queue = processing_queue |         self.processing_queue = processing_queue | ||||||
|         self.processed_frames = processed_frames |         self.processed_frames = processed_frames | ||||||
|         self.pause = pause |         self.pause = pause | ||||||
| 
 | 
 | ||||||
|  |         self.running = False | ||||||
|  |         self.processor_objects = {} | ||||||
|  | 
 | ||||||
|         signal.signal(signal.SIGTERM, self._stop) |         signal.signal(signal.SIGTERM, self._stop) | ||||||
| 
 | 
 | ||||||
|     def run(self) -> None: |     def run(self) -> None: | ||||||
| @ -56,7 +58,6 @@ class Interpolator(multiprocessing.Process): | |||||||
|         logger.opt(colors=True).info( |         logger.opt(colors=True).info( | ||||||
|             f"Interpolator process <blue>{self.name}</blue> initiating" |             f"Interpolator process <blue>{self.name}</blue> initiating" | ||||||
|         ) |         ) | ||||||
|         processor_objects = {} |  | ||||||
|         while self.running is True: |         while self.running is True: | ||||||
|             try: |             try: | ||||||
|                 # pause if pause flag is set |                 # pause if pause flag is set | ||||||
| @ -80,6 +81,7 @@ class Interpolator(multiprocessing.Process): | |||||||
|                 if image0 is None: |                 if image0 is None: | ||||||
|                     continue |                     continue | ||||||
| 
 | 
 | ||||||
|  |                 # calculate the %diff between the current frame and the previous frame | ||||||
|                 difference = ImageChops.difference(image0, image1) |                 difference = ImageChops.difference(image0, image1) | ||||||
|                 difference_stat = ImageStat.Stat(difference) |                 difference_stat = ImageStat.Stat(difference) | ||||||
|                 difference_ratio = ( |                 difference_ratio = ( | ||||||
| @ -92,10 +94,10 @@ class Interpolator(multiprocessing.Process): | |||||||
| 
 | 
 | ||||||
|                     # select a processor object with the required settings |                     # select a processor object with the required settings | ||||||
|                     # create a new object if none are available |                     # create a new object if none are available | ||||||
|                     processor_object = processor_objects.get(algorithm) |                     processor_object = self.processor_objects.get(algorithm) | ||||||
|                     if processor_object is None: |                     if processor_object is None: | ||||||
|                         processor_object = ALGORITHM_CLASSES[algorithm](0) |                         processor_object = ALGORITHM_CLASSES[algorithm](0) | ||||||
|                         processor_objects[algorithm] = processor_object |                         self.processor_objects[algorithm] = processor_object | ||||||
|                     interpolated_image = processor_object.process(image0, image1) |                     interpolated_image = processor_object.process(image0, image1) | ||||||
| 
 | 
 | ||||||
|                 # if the difference is greater than threshold |                 # if the difference is greater than threshold | ||||||
|  | |||||||
							
								
								
									
										69
									
								
								video2x/processor.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										69
									
								
								video2x/processor.py
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,69 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  | # -*- coding: utf-8 -*- | ||||||
|  | """ | ||||||
|  | Copyright (C) 2018-2022 K4YT3X and contributors. | ||||||
|  | 
 | ||||||
|  | This program is free software: you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU Affero General Public License as | ||||||
|  | published by the Free Software Foundation, either version 3 of the | ||||||
|  | License, or (at your option) any later version. | ||||||
|  | 
 | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||||
|  | GNU Affero General Public License for more details. | ||||||
|  | 
 | ||||||
|  | You should have received a copy of the GNU Affero General Public License | ||||||
|  | along with this program. If not, see <https://www.gnu.org/licenses/>. | ||||||
|  | 
 | ||||||
|  | Name: Processor Abstract Class | ||||||
|  | Author: K4YT3X | ||||||
|  | Date Created: April 9, 2022 | ||||||
|  | Last Modified: April 9, 2022 | ||||||
|  | """ | ||||||
|  | 
 | ||||||
|  | from abc import ABC, abstractmethod | ||||||
|  | from multiprocessing import Queue | ||||||
|  | from multiprocessing.managers import DictProxy | ||||||
|  | from multiprocessing.sharedctypes import Synchronized | ||||||
|  | 
 | ||||||
|  | from PIL import Image, ImageChops, ImageStat | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class Processor(ABC): | ||||||
|  |     def __init__( | ||||||
|  |         self, tasks_queue: Queue, processed_frames: DictProxy, pause_flag: Synchronized | ||||||
|  |     ) -> None: | ||||||
|  |         self.tasks_queue = tasks_queue | ||||||
|  |         self.processed_frames = processed_frames | ||||||
|  |         self.pause_flag = pause_flag | ||||||
|  | 
 | ||||||
|  |     @abstractmethod | ||||||
|  |     def process(self): | ||||||
|  |         raise NotImplementedError | ||||||
|  | 
 | ||||||
|  |     @staticmethod | ||||||
|  |     def get_image_diff(image0: Image.Image, image1: Image.Image) -> float: | ||||||
|  |         """ | ||||||
|  |         get the percentage difference between two images | ||||||
|  | 
 | ||||||
|  |         :param image0 Image.Image: the image to compare | ||||||
|  |         :param image1 Image.Image: the image to compare against | ||||||
|  |         :rtype float: precentage difference between two frames | ||||||
|  |         """ | ||||||
|  |         difference_stat = ImageStat.Stat(ImageChops.difference(image0, image1)) | ||||||
|  |         return sum(difference_stat.mean) / (len(difference_stat.mean) * 255) * 100 | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     def run( | ||||||
|  |         self, | ||||||
|  |     ) -> None: | ||||||
|  |         self.running = True | ||||||
|  |         while self.running is True: | ||||||
|  |             self.process() | ||||||
|  |         self.running = False | ||||||
|  |         return super().run() | ||||||
|  | 
 | ||||||
|  |     def stop(self, _signal_number, _frame) -> None: | ||||||
|  |         self.running = False | ||||||
|  |     """ | ||||||
| @ -19,24 +19,22 @@ along with this program. If not, see <https://www.gnu.org/licenses/>. | |||||||
| Name: Upscaler | Name: Upscaler | ||||||
| Author: K4YT3X | Author: K4YT3X | ||||||
| Date Created: May 27, 2021 | Date Created: May 27, 2021 | ||||||
| Last Modified: March 20, 2022 | Last Modified: April 10, 2022 | ||||||
| """ | """ | ||||||
| 
 | 
 | ||||||
| import math | import math | ||||||
| import multiprocessing |  | ||||||
| import queue |  | ||||||
| import signal |  | ||||||
| import time | import time | ||||||
| from multiprocessing.managers import ListProxy |  | ||||||
| from multiprocessing.sharedctypes import Synchronized |  | ||||||
| 
 | 
 | ||||||
| from loguru import logger | from PIL import Image | ||||||
| from PIL import Image, ImageChops, ImageStat |  | ||||||
| from realcugan_ncnn_vulkan_python import Realcugan | from realcugan_ncnn_vulkan_python import Realcugan | ||||||
| from realsr_ncnn_vulkan_python import Realsr | from realsr_ncnn_vulkan_python import Realsr | ||||||
| from srmd_ncnn_vulkan_python import Srmd | from srmd_ncnn_vulkan_python import Srmd | ||||||
| from waifu2x_ncnn_vulkan_python import Waifu2x | from waifu2x_ncnn_vulkan_python import Waifu2x | ||||||
| 
 | 
 | ||||||
|  | from .processor import Processor | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class Upscaler: | ||||||
|     # fixed scaling ratios supported by the algorithms |     # fixed scaling ratios supported by the algorithms | ||||||
|     # that only support certain fixed scale ratios |     # that only support certain fixed scale ratios | ||||||
|     ALGORITHM_FIXED_SCALING_RATIOS = { |     ALGORITHM_FIXED_SCALING_RATIOS = { | ||||||
| @ -53,99 +51,46 @@ ALGORITHM_CLASSES = { | |||||||
|         "realcugan": Realcugan, |         "realcugan": Realcugan, | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| class Upscaler(multiprocessing.Process): |  | ||||||
|     def __init__( |  | ||||||
|         self, |  | ||||||
|         processing_queue: multiprocessing.Queue, |  | ||||||
|         processed_frames: ListProxy, |  | ||||||
|         pause: Synchronized, |  | ||||||
|     ) -> None: |  | ||||||
|         multiprocessing.Process.__init__(self) |  | ||||||
|         self.running = False |  | ||||||
|         self.processing_queue = processing_queue |  | ||||||
|         self.processed_frames = processed_frames |  | ||||||
|         self.pause = pause |  | ||||||
| 
 |  | ||||||
|         signal.signal(signal.SIGTERM, self._stop) |  | ||||||
| 
 |  | ||||||
|     def run(self) -> None: |  | ||||||
|         self.running = True |  | ||||||
|         logger.opt(colors=True).info( |  | ||||||
|             f"Upscaler process <blue>{self.name}</blue> initiating" |  | ||||||
|         ) |  | ||||||
|     processor_objects = {} |     processor_objects = {} | ||||||
|         while self.running is True: |  | ||||||
|             try: |  | ||||||
|                 # pause if pause flag is set |  | ||||||
|                 if self.pause.value is True: |  | ||||||
|                     time.sleep(0.1) |  | ||||||
|                     continue |  | ||||||
| 
 | 
 | ||||||
|                 try: |     @staticmethod | ||||||
|                     # get new job from queue |     def _get_scaling_tasks( | ||||||
|                     ( |         input_width: int, | ||||||
|                         frame_index, |         input_height: int, | ||||||
|                         (image0, image1), |         output_width: int, | ||||||
|                         ( |         output_height: int, | ||||||
|                             output_width, |         algorithm: str, | ||||||
|                             output_height, |     ) -> list: | ||||||
|                             noise, |         """ | ||||||
|                             difference_threshold, |         Get the required tasks for upscaling the image until it is larger than | ||||||
|                             algorithm, |         or equal to the desired output dimensions. For example, SRMD only supports | ||||||
|                         ), |         2x, 3x, and 4x, so upsclaing an image from 320x240 to 3840x2160 will | ||||||
|                     ) = self.processing_queue.get(False) |         require the SRMD to run 3x then 4x. In this case, this function will | ||||||
| 
 |         return [3, 4]. | ||||||
|                     # destructure settings |  | ||||||
|                 except queue.Empty: |  | ||||||
|                     time.sleep(0.1) |  | ||||||
|                     continue |  | ||||||
| 
 |  | ||||||
|                 difference_ratio = 0 |  | ||||||
|                 if image0 is not None: |  | ||||||
|                     difference = ImageChops.difference(image0, image1) |  | ||||||
|                     difference_stat = ImageStat.Stat(difference) |  | ||||||
|                     difference_ratio = ( |  | ||||||
|                         sum(difference_stat.mean) |  | ||||||
|                         / (len(difference_stat.mean) * 255) |  | ||||||
|                         * 100 |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                 # if the difference is lower than threshold |  | ||||||
|                 # skip this frame |  | ||||||
|                 if difference_ratio < difference_threshold: |  | ||||||
| 
 |  | ||||||
|                     # make sure the previous frame has been processed |  | ||||||
|                     if frame_index > 0: |  | ||||||
|                         while self.processed_frames[frame_index - 1] is None: |  | ||||||
|                             time.sleep(0.1) |  | ||||||
| 
 |  | ||||||
|                     # make the current image the same as the previous result |  | ||||||
|                     self.processed_frames[frame_index] = self.processed_frames[ |  | ||||||
|                         frame_index - 1 |  | ||||||
|                     ] |  | ||||||
| 
 |  | ||||||
|                 # if the difference is greater than threshold |  | ||||||
|                 # process this frame |  | ||||||
|                 else: |  | ||||||
|                     width, height = image1.size |  | ||||||
| 
 | 
 | ||||||
|  |         :param input_width int: input image width | ||||||
|  |         :param input_height int: input image height | ||||||
|  |         :param output_width int: desired output image width | ||||||
|  |         :param output_height int: desired output image size | ||||||
|  |         :param algorithm str: upsclaing algorithm | ||||||
|  |         :rtype list: the list of upsclaing tasks required | ||||||
|  |         """ | ||||||
|         # calculate required minimum scale ratio |         # calculate required minimum scale ratio | ||||||
|                     output_scale = max(output_width / width, output_height / height) |         output_scale = max(output_width / input_width, output_height / input_height) | ||||||
| 
 | 
 | ||||||
|         # select the optimal algorithm scaling ratio to use |         # select the optimal algorithm scaling ratio to use | ||||||
|         supported_scaling_ratios = sorted( |         supported_scaling_ratios = sorted( | ||||||
|                         ALGORITHM_FIXED_SCALING_RATIOS[algorithm] |             Upscaler.ALGORITHM_FIXED_SCALING_RATIOS[algorithm] | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|         remaining_scaling_ratio = math.ceil(output_scale) |         remaining_scaling_ratio = math.ceil(output_scale) | ||||||
|                     scaling_jobs = [] |  | ||||||
| 
 | 
 | ||||||
|         # if the scaling ratio is 1.0 |         # if the scaling ratio is 1.0 | ||||||
|         # apply the smallest scaling ratio available |         # apply the smallest scaling ratio available | ||||||
|         if remaining_scaling_ratio == 1: |         if remaining_scaling_ratio == 1: | ||||||
|                         scaling_jobs.append(supported_scaling_ratios[0]) |             return [supported_scaling_ratios[0]] | ||||||
|                     else: | 
 | ||||||
|  |         scaling_jobs = [] | ||||||
|         while remaining_scaling_ratio > 1: |         while remaining_scaling_ratio > 1: | ||||||
|             for ratio in supported_scaling_ratios: |             for ratio in supported_scaling_ratios: | ||||||
|                 if ratio >= remaining_scaling_ratio: |                 if ratio >= remaining_scaling_ratio: | ||||||
| @ -167,41 +112,91 @@ class Upscaler(multiprocessing.Process): | |||||||
| 
 | 
 | ||||||
|                 if found is False: |                 if found is False: | ||||||
|                     scaling_jobs.append(supported_scaling_ratios[-1]) |                     scaling_jobs.append(supported_scaling_ratios[-1]) | ||||||
|                                     remaining_scaling_ratio /= supported_scaling_ratios[ |                     remaining_scaling_ratio /= supported_scaling_ratios[-1] | ||||||
|                                         -1 |         return scaling_jobs | ||||||
|                                     ] |  | ||||||
| 
 | 
 | ||||||
|                     for job in scaling_jobs: |     def upscale_image( | ||||||
|  |         self, | ||||||
|  |         image: Image.Image, | ||||||
|  |         output_width: int, | ||||||
|  |         output_height: int, | ||||||
|  |         algorithm: str, | ||||||
|  |         noise: int, | ||||||
|  |     ) -> Image.Image: | ||||||
|  |         """ | ||||||
|  |         upscale an image | ||||||
|  | 
 | ||||||
|  |         :param image Image.Image: the image to upscale | ||||||
|  |         :param output_width int: the desired output width | ||||||
|  |         :param output_height int: the desired output height | ||||||
|  |         :param algorithm str: the algorithm to use | ||||||
|  |         :param noise int: the noise level (available only for some algorithms) | ||||||
|  |         :rtype Image.Image: the upscaled image | ||||||
|  |         """ | ||||||
|  |         width, height = image.size | ||||||
|  | 
 | ||||||
|  |         for task in self._get_scaling_tasks( | ||||||
|  |             width, height, output_width, output_height, algorithm | ||||||
|  |         ): | ||||||
| 
 | 
 | ||||||
|             # select a processor object with the required settings |             # select a processor object with the required settings | ||||||
|             # create a new object if none are available |             # create a new object if none are available | ||||||
|                         processor_object = processor_objects.get((algorithm, job)) |             processor_object = self.processor_objects.get((algorithm, task)) | ||||||
|             if processor_object is None: |             if processor_object is None: | ||||||
|                             processor_object = ALGORITHM_CLASSES[algorithm]( |                 processor_object = self.ALGORITHM_CLASSES[algorithm]( | ||||||
|                                 noise=noise, scale=job |                     noise=noise, scale=task | ||||||
|                 ) |                 ) | ||||||
|                             processor_objects[(algorithm, job)] = processor_object |                 self.processor_objects[(algorithm, task)] = processor_object | ||||||
| 
 | 
 | ||||||
|             # process the image with the selected algorithm |             # process the image with the selected algorithm | ||||||
|                         image1 = processor_object.process(image1) |             image = processor_object.process(image) | ||||||
| 
 | 
 | ||||||
|         # downscale the image to the desired output size and |         # downscale the image to the desired output size and | ||||||
|         # save the image to disk |         # save the image to disk | ||||||
|                     image1 = image1.resize((output_width, output_height), Image.LANCZOS) |         return image.resize((output_width, output_height), Image.Resampling.LANCZOS) | ||||||
|                     self.processed_frames[frame_index] = image1 |  | ||||||
| 
 | 
 | ||||||
|             # send exceptions into the client connection pipe |  | ||||||
|             except (SystemExit, KeyboardInterrupt): |  | ||||||
|                 break |  | ||||||
| 
 | 
 | ||||||
|             except Exception as error: | class UpscalerProcessor(Processor, Upscaler): | ||||||
|                 logger.exception(error) |     def process(self) -> None: | ||||||
|                 break |  | ||||||
| 
 | 
 | ||||||
|         logger.opt(colors=True).info( |         task = self.tasks_queue.get() | ||||||
|             f"Upscaler process <blue>{self.name}</blue> terminating" |         while task is not None: | ||||||
|  | 
 | ||||||
|  |             try: | ||||||
|  | 
 | ||||||
|  |                 if self.pause_flag.value is True: | ||||||
|  |                     time.sleep(0.1) | ||||||
|  |                     continue | ||||||
|  | 
 | ||||||
|  |                 # unpack the task's values | ||||||
|  |                 ( | ||||||
|  |                     frame_index, | ||||||
|  |                     previous_frame, | ||||||
|  |                     current_frame, | ||||||
|  |                     (output_width, output_height, algorithm, noise, threshold), | ||||||
|  |                 ) = task | ||||||
|  | 
 | ||||||
|  |                 # calculate the %diff between the current frame and the previous frame | ||||||
|  |                 difference_ratio = 0 | ||||||
|  |                 if previous_frame is not None: | ||||||
|  |                     difference_ratio = self.get_image_diff( | ||||||
|  |                         previous_frame, current_frame | ||||||
|                     ) |                     ) | ||||||
|         return super().run() |  | ||||||
| 
 | 
 | ||||||
|     def _stop(self, _signal_number, _frame) -> None: |                 # if the difference is lower than threshold, skip this frame | ||||||
|         self.running = False |                 if difference_ratio < threshold: | ||||||
|  | 
 | ||||||
|  |                     # make the current image the same as the previous result | ||||||
|  |                     self.processed_frames[frame_index] = True | ||||||
|  | 
 | ||||||
|  |                 # if the difference is greater than threshold | ||||||
|  |                 # process this frame | ||||||
|  |                 else: | ||||||
|  |                     self.processed_frames[frame_index] = self.upscale_image( | ||||||
|  |                         current_frame, output_width, output_height, algorithm, noise | ||||||
|  |                     ) | ||||||
|  | 
 | ||||||
|  |                 task = self.tasks_queue.get() | ||||||
|  | 
 | ||||||
|  |             except KeyboardInterrupt: | ||||||
|  |                 break | ||||||
|  | |||||||
| @ -27,7 +27,7 @@ __      __  _       _                  ___   __   __ | |||||||
| Name: Video2X | Name: Video2X | ||||||
| Creator: K4YT3X | Creator: K4YT3X | ||||||
| Date Created: February 24, 2018 | Date Created: February 24, 2018 | ||||||
| Last Modified: April 5, 2022 | Last Modified: August 28, 2022 | ||||||
| 
 | 
 | ||||||
| Editor: BrianPetkovsek | Editor: BrianPetkovsek | ||||||
| Last Modified: June 17, 2019 | Last Modified: June 17, 2019 | ||||||
| @ -39,20 +39,18 @@ Editor: 28598519a | |||||||
| Last Modified: March 23, 2020 | Last Modified: March 23, 2020 | ||||||
| """ | """ | ||||||
| 
 | 
 | ||||||
| import argparse |  | ||||||
| import ctypes | import ctypes | ||||||
| import math | import math | ||||||
| import multiprocessing |  | ||||||
| import os |  | ||||||
| import pathlib |  | ||||||
| import signal | import signal | ||||||
| import sys | import sys | ||||||
| import time | import time | ||||||
|  | from enum import Enum | ||||||
|  | from multiprocessing import Manager, Pool, Queue, Value | ||||||
|  | from pathlib import Path | ||||||
| 
 | 
 | ||||||
| import ffmpeg | import ffmpeg | ||||||
| from cv2 import cv2 | from cv2 import cv2 | ||||||
| from loguru import logger | from loguru import logger | ||||||
| from rich import print as rich_print |  | ||||||
| from rich.console import Console | from rich.console import Console | ||||||
| from rich.file_proxy import FileProxy | from rich.file_proxy import FileProxy | ||||||
| from rich.progress import ( | from rich.progress import ( | ||||||
| @ -65,41 +63,23 @@ from rich.progress import ( | |||||||
| ) | ) | ||||||
| from rich.text import Text | from rich.text import Text | ||||||
| 
 | 
 | ||||||
|  | from video2x.processor import Processor | ||||||
|  | 
 | ||||||
| from . import __version__ | from . import __version__ | ||||||
| from .decoder import VideoDecoder | from .decoder import VideoDecoder, VideoDecoderThread | ||||||
| from .encoder import VideoEncoder | from .encoder import VideoEncoder | ||||||
| from .interpolator import Interpolator | from .interpolator import Interpolator | ||||||
| from .upscaler import Upscaler | from .upscaler import UpscalerProcessor | ||||||
| 
 | 
 | ||||||
| # for desktop environments only | # for desktop environments only | ||||||
| # if pynput can be loaded, enable global pause hotkey support | # if pynput can be loaded, enable global pause hotkey support | ||||||
| try: | try: | ||||||
|     import pynput |     from pynput.keyboard import HotKey, Listener | ||||||
| except ImportError: | except ImportError: | ||||||
|     ENABLE_HOTKEY = False |     ENABLE_HOTKEY = False | ||||||
| else: | else: | ||||||
|     ENABLE_HOTKEY = True |     ENABLE_HOTKEY = True | ||||||
| 
 | 
 | ||||||
| LEGAL_INFO = f"""Video2X\t\t{__version__} |  | ||||||
| Author:\t\tK4YT3X |  | ||||||
| License:\tGNU AGPL v3 |  | ||||||
| Github Page:\thttps://github.com/k4yt3x/video2x |  | ||||||
| Contact:\ti@k4yt3x.com""" |  | ||||||
| 
 |  | ||||||
| # algorithms available for upscaling tasks |  | ||||||
| UPSCALING_ALGORITHMS = [ |  | ||||||
|     "waifu2x", |  | ||||||
|     "srmd", |  | ||||||
|     "realsr", |  | ||||||
|     "realcugan", |  | ||||||
| ] |  | ||||||
| 
 |  | ||||||
| # algorithms available for frame interpolation tasks |  | ||||||
| INTERPOLATION_ALGORITHMS = ["rife"] |  | ||||||
| 
 |  | ||||||
| # progress bar labels for different modes |  | ||||||
| MODE_LABELS = {"upscale": "Upscaling", "interpolate": "Interpolating"} |  | ||||||
| 
 |  | ||||||
| # format string for Loguru loggers | # format string for Loguru loggers | ||||||
| LOGURU_FORMAT = ( | LOGURU_FORMAT = ( | ||||||
|     "<green>{time:HH:mm:ss.SSSSSS!UTC}</green> | " |     "<green>{time:HH:mm:ss.SSSSSS!UTC}</green> | " | ||||||
| @ -119,6 +99,11 @@ class ProcessingSpeedColumn(ProgressColumn): | |||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | class ProcessingMode(Enum): | ||||||
|  |     UPSCALE = {"label": "Upscaling", "processor": UpscalerProcessor} | ||||||
|  |     INTERPOLATE = {"label": "Interpolating", "processor": Interpolator} | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| class Video2X: | class Video2X: | ||||||
|     """ |     """ | ||||||
|     Video2X class |     Video2X class | ||||||
| @ -132,11 +117,11 @@ class Video2X: | |||||||
|         self.version = __version__ |         self.version = __version__ | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def _get_video_info(path: pathlib.Path) -> tuple: |     def _get_video_info(path: Path) -> tuple: | ||||||
|         """ |         """ | ||||||
|         get video file information with FFmpeg |         get video file information with FFmpeg | ||||||
| 
 | 
 | ||||||
|         :param path pathlib.Path: video file path |         :param path Path: video file path | ||||||
|         :raises RuntimeError: raised when video stream isn't found |         :raises RuntimeError: raised when video stream isn't found | ||||||
|         """ |         """ | ||||||
|         # probe video file info |         # probe video file info | ||||||
| @ -160,34 +145,17 @@ class Video2X: | |||||||
| 
 | 
 | ||||||
|         return video_info["width"], video_info["height"], total_frames, frame_rate |         return video_info["width"], video_info["height"], total_frames, frame_rate | ||||||
| 
 | 
 | ||||||
|     def _toggle_pause(self, _signal_number: int = -1, _frame=None): |  | ||||||
|         # print console messages and update the progress bar's status |  | ||||||
|         if self.pause.value is False: |  | ||||||
|             self.progress.update(self.task, description=self.description + " (paused)") |  | ||||||
|             self.progress.stop_task(self.task) |  | ||||||
|             logger.warning("Processing paused, press Ctrl+Alt+V again to resume") |  | ||||||
| 
 |  | ||||||
|         elif self.pause.value is True: |  | ||||||
|             self.progress.update(self.task, description=self.description) |  | ||||||
|             logger.warning("Resuming processing") |  | ||||||
|             self.progress.start_task(self.task) |  | ||||||
| 
 |  | ||||||
|         # invert the value of the pause flag |  | ||||||
|         with self.pause.get_lock(): |  | ||||||
|             self.pause.value = not self.pause.value |  | ||||||
| 
 |  | ||||||
|     def _run( |     def _run( | ||||||
|         self, |         self, | ||||||
|         input_path: pathlib.Path, |         input_path: Path, | ||||||
|         width: int, |         width: int, | ||||||
|         height: int, |         height: int, | ||||||
|         total_frames: int, |         total_frames: int, | ||||||
|         frame_rate: float, |         frame_rate: float, | ||||||
|         output_path: pathlib.Path, |         output_path: Path, | ||||||
|         output_width: int, |         output_width: int, | ||||||
|         output_height: int, |         output_height: int, | ||||||
|         Processor: object, |         mode: ProcessingMode, | ||||||
|         mode: str, |  | ||||||
|         processes: int, |         processes: int, | ||||||
|         processing_settings: tuple, |         processing_settings: tuple, | ||||||
|     ) -> None: |     ) -> None: | ||||||
| @ -207,51 +175,40 @@ class Video2X: | |||||||
|         logger.remove() |         logger.remove() | ||||||
|         logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) |         logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) | ||||||
| 
 | 
 | ||||||
|         # initialize values |         # TODO: add docs | ||||||
|         self.processor_processes = [] |         tasks_queue = Queue(maxsize=processes * 10) | ||||||
|         self.processing_queue = multiprocessing.Queue(maxsize=processes * 10) |         processed_frames = Manager().dict() | ||||||
|         processed_frames = multiprocessing.Manager().list([None] * total_frames) |         pause_flag = Value(ctypes.c_bool, False) | ||||||
|         self.processed = multiprocessing.Value("I", 0) |  | ||||||
|         self.pause = multiprocessing.Value(ctypes.c_bool, False) |  | ||||||
| 
 | 
 | ||||||
|         # set up and start decoder thread |         # set up and start decoder thread | ||||||
|         logger.info("Starting video decoder") |         logger.info("Starting video decoder") | ||||||
|         self.decoder = VideoDecoder( |         decoder = VideoDecoder( | ||||||
|             input_path, |             input_path, | ||||||
|             width, |             width, | ||||||
|             height, |             height, | ||||||
|             frame_rate, |             frame_rate, | ||||||
|             self.processing_queue, |  | ||||||
|             processing_settings, |  | ||||||
|             self.pause, |  | ||||||
|         ) |         ) | ||||||
|         self.decoder.start() |         decoder_thread = VideoDecoderThread(tasks_queue, decoder, processing_settings) | ||||||
|  |         decoder_thread.start() | ||||||
| 
 | 
 | ||||||
|         # set up and start encoder thread |         # set up and start encoder thread | ||||||
|         logger.info("Starting video encoder") |         logger.info("Starting video encoder") | ||||||
|         self.encoder = VideoEncoder( |         encoder = VideoEncoder( | ||||||
|             input_path, |             input_path, | ||||||
|             frame_rate * 2 if mode == "interpolate" else frame_rate, |             frame_rate * 2 if mode == "interpolate" else frame_rate, | ||||||
|             output_path, |             output_path, | ||||||
|             output_width, |             output_width, | ||||||
|             output_height, |             output_height, | ||||||
|             total_frames, |  | ||||||
|             processed_frames, |  | ||||||
|             self.processed, |  | ||||||
|             self.pause, |  | ||||||
|         ) |         ) | ||||||
|         self.encoder.start() |  | ||||||
| 
 | 
 | ||||||
|         # create processor processes |         # create a pool of processor processes to process the queue | ||||||
|         for process_name in range(processes): |         processor: Processor = mode.value["processor"]( | ||||||
|             process = Processor(self.processing_queue, processed_frames, self.pause) |             tasks_queue, processed_frames, pause_flag | ||||||
|             process.name = str(process_name) |         ) | ||||||
|             process.daemon = True |         processor_pool = Pool(processes, processor.process) | ||||||
|             process.start() |  | ||||||
|             self.processor_processes.append(process) |  | ||||||
| 
 | 
 | ||||||
|         # create progress bar |         # create progress bar | ||||||
|         self.progress = Progress( |         progress = Progress( | ||||||
|             "[progress.description]{task.description}", |             "[progress.description]{task.description}", | ||||||
|             BarColumn(complete_style="blue", finished_style="green"), |             BarColumn(complete_style="blue", finished_style="green"), | ||||||
|             "[progress.percentage]{task.percentage:>3.0f}%", |             "[progress.percentage]{task.percentage:>3.0f}%", | ||||||
| @ -264,23 +221,42 @@ class Video2X: | |||||||
|             speed_estimate_period=300.0, |             speed_estimate_period=300.0, | ||||||
|             disable=True, |             disable=True, | ||||||
|         ) |         ) | ||||||
|  |         task = progress.add_task(f"[cyan]{mode.value['label']}", total=total_frames) | ||||||
| 
 | 
 | ||||||
|         self.description = f"[cyan]{MODE_LABELS.get(mode, 'Unknown')}" |         def _toggle_pause(_signal_number: int = -1, _frame=None): | ||||||
|         self.task = self.progress.add_task(self.description, total=total_frames) | 
 | ||||||
|  |             # allow the closure to modify external immutable flag | ||||||
|  |             nonlocal pause_flag | ||||||
|  | 
 | ||||||
|  |             # print console messages and update the progress bar's status | ||||||
|  |             if pause_flag.value is False: | ||||||
|  |                 progress.update( | ||||||
|  |                     task, description=f"[cyan]{mode.value['label']} (paused)" | ||||||
|  |                 ) | ||||||
|  |                 progress.stop_task(task) | ||||||
|  |                 logger.warning("Processing paused, press Ctrl+Alt+V again to resume") | ||||||
|  | 
 | ||||||
|  |             # the lock is already acquired | ||||||
|  |             elif pause_flag.value is True: | ||||||
|  |                 progress.update(task, description=f"[cyan]{mode.value['label']}") | ||||||
|  |                 logger.warning("Resuming processing") | ||||||
|  |                 progress.start_task(task) | ||||||
|  | 
 | ||||||
|  |             # invert the flag | ||||||
|  |             with pause_flag.get_lock(): | ||||||
|  |                 pause_flag.value = not pause_flag.value | ||||||
| 
 | 
 | ||||||
|         # allow sending SIGUSR1 to pause/resume processing |         # allow sending SIGUSR1 to pause/resume processing | ||||||
|         signal.signal(signal.SIGUSR1, self._toggle_pause) |         signal.signal(signal.SIGUSR1, _toggle_pause) | ||||||
| 
 | 
 | ||||||
|         # enable global pause hotkey if it's supported |         # enable global pause hotkey if it's supported | ||||||
|         if ENABLE_HOTKEY is True: |         if ENABLE_HOTKEY is True: | ||||||
| 
 | 
 | ||||||
|             # create global pause hotkey |             # create global pause hotkey | ||||||
|             pause_hotkey = pynput.keyboard.HotKey( |             pause_hotkey = HotKey(HotKey.parse("<ctrl>+<alt>+v"), _toggle_pause) | ||||||
|                 pynput.keyboard.HotKey.parse("<ctrl>+<alt>+v"), self._toggle_pause |  | ||||||
|             ) |  | ||||||
| 
 | 
 | ||||||
|             # create global keyboard input listener |             # create global keyboard input listener | ||||||
|             keyboard_listener = pynput.keyboard.Listener( |             keyboard_listener = Listener( | ||||||
|                 on_press=( |                 on_press=( | ||||||
|                     lambda key: pause_hotkey.press(keyboard_listener.canonical(key)) |                     lambda key: pause_hotkey.press(keyboard_listener.canonical(key)) | ||||||
|                 ), |                 ), | ||||||
| @ -293,51 +269,52 @@ class Video2X: | |||||||
|             keyboard_listener.start() |             keyboard_listener.start() | ||||||
| 
 | 
 | ||||||
|         # a temporary variable that stores the exception |         # a temporary variable that stores the exception | ||||||
|         exception = [] |         exceptions = [] | ||||||
| 
 | 
 | ||||||
|         try: |         try: | ||||||
| 
 | 
 | ||||||
|             # wait for jobs in queue to deplete |             # let the context manager automatically stop the progress bar | ||||||
|             while self.processed.value < total_frames - 1: |             with progress: | ||||||
|                 time.sleep(1) |  | ||||||
| 
 | 
 | ||||||
|                 # check processor health |                 frame_index = 0 | ||||||
|                 for process in self.processor_processes: |                 while frame_index < total_frames: | ||||||
|                     if not process.is_alive(): |  | ||||||
|                         raise Exception("process died unexpectedly") |  | ||||||
| 
 | 
 | ||||||
|                 # check decoder health |                     current_frame = processed_frames.get(frame_index) | ||||||
|                 if not self.decoder.is_alive() and self.decoder.exception is not None: |  | ||||||
|                     raise Exception("decoder died unexpectedly") |  | ||||||
| 
 | 
 | ||||||
|                 # check encoder health |                     if pause_flag.value is True or current_frame is None: | ||||||
|                 if not self.encoder.is_alive() and self.encoder.exception is not None: |                         time.sleep(0.1) | ||||||
|                     raise Exception("encoder died unexpectedly") |                         continue | ||||||
| 
 | 
 | ||||||
|                 # show progress bar when upscale starts |                     # show the progress bar after the processing starts | ||||||
|                 if self.progress.disable is True and self.processed.value > 0: |                     # reduces speed estimation inaccuracies and print overlaps | ||||||
|                     self.progress.disable = False |                     if frame_index == 0: | ||||||
|                     self.progress.start() |                         progress.disable = False | ||||||
|  |                         progress.start() | ||||||
| 
 | 
 | ||||||
|                 # update progress |                     if current_frame is True: | ||||||
|                 if self.pause.value is False: |                         encoder.write(processed_frames.get(frame_index - 1)) | ||||||
|                     self.progress.update(self.task, completed=self.processed.value) |  | ||||||
| 
 | 
 | ||||||
|             self.progress.update(self.task, completed=total_frames) |                     else: | ||||||
|             self.progress.stop() |                         encoder.write(current_frame) | ||||||
|             logger.info("Processing has completed") | 
 | ||||||
|  |                         if frame_index > 0: | ||||||
|  |                             del processed_frames[frame_index - 1] | ||||||
|  | 
 | ||||||
|  |                     progress.update(task, completed=frame_index + 1) | ||||||
|  |                     frame_index += 1 | ||||||
| 
 | 
 | ||||||
|         # if SIGTERM is received or ^C is pressed |         # if SIGTERM is received or ^C is pressed | ||||||
|         except (SystemExit, KeyboardInterrupt) as error: |         except (SystemExit, KeyboardInterrupt) as error: | ||||||
|             self.progress.stop() |  | ||||||
|             logger.warning("Exit signal received, exiting gracefully") |             logger.warning("Exit signal received, exiting gracefully") | ||||||
|             logger.warning("Press ^C again to force terminate") |             logger.warning("Press ^C again to force terminate") | ||||||
|             exception.append(error) |             exceptions.append(error) | ||||||
| 
 | 
 | ||||||
|         except Exception as error: |         except Exception as error: | ||||||
|             self.progress.stop() |  | ||||||
|             logger.exception(error) |             logger.exception(error) | ||||||
|             exception.append(error) |             exceptions.append(error) | ||||||
|  | 
 | ||||||
|  |         else: | ||||||
|  |             logger.info("Processing has completed") | ||||||
| 
 | 
 | ||||||
|         finally: |         finally: | ||||||
| 
 | 
 | ||||||
| @ -346,31 +323,28 @@ class Video2X: | |||||||
|                 keyboard_listener.stop() |                 keyboard_listener.stop() | ||||||
|                 keyboard_listener.join() |                 keyboard_listener.join() | ||||||
| 
 | 
 | ||||||
|             # stop progress display |             # if errors have occurred, kill the FFmpeg processes | ||||||
|             self.progress.stop() |             if len(exceptions) > 0: | ||||||
|  |                 decoder.kill() | ||||||
|  |                 encoder.kill() | ||||||
| 
 | 
 | ||||||
|             # stop processor processes |             # stop the decoder | ||||||
|             logger.info("Stopping processor processes") |             decoder_thread.stop() | ||||||
|             for process in self.processor_processes: |             decoder_thread.join() | ||||||
|                 process.terminate() |  | ||||||
| 
 | 
 | ||||||
|             # wait for processes to finish |             # clear queue and signal processors to exit | ||||||
|             for process in self.processor_processes: |             # multiprocessing.Queue has no Queue.queue.clear | ||||||
|                 process.join() |             while tasks_queue.empty() is not True: | ||||||
|  |                 tasks_queue.get() | ||||||
|  |             for _ in range(processes): | ||||||
|  |                 tasks_queue.put(None) | ||||||
| 
 | 
 | ||||||
|             # stop encoder and decoder |             # close and join the process pool | ||||||
|             logger.info("Stopping decoder and encoder threads") |             processor_pool.close() | ||||||
|             self.decoder.stop() |             processor_pool.join() | ||||||
|             self.encoder.stop() |  | ||||||
|             self.decoder.join() |  | ||||||
|             self.encoder.join() |  | ||||||
| 
 | 
 | ||||||
|             # mark processing queue as closed |             # stop the encoder | ||||||
|             self.processing_queue.close() |             encoder.join() | ||||||
| 
 |  | ||||||
|             # raise the error if there is any |  | ||||||
|             if len(exception) > 0: |  | ||||||
|                 raise exception[0] |  | ||||||
| 
 | 
 | ||||||
|             # restore original STDOUT and STDERR |             # restore original STDOUT and STDERR | ||||||
|             sys.stdout = original_stdout |             sys.stdout = original_stdout | ||||||
| @ -380,10 +354,14 @@ class Video2X: | |||||||
|             logger.remove() |             logger.remove() | ||||||
|             logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) |             logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) | ||||||
| 
 | 
 | ||||||
|  |             # raise the first collected error | ||||||
|  |             if len(exceptions) > 0: | ||||||
|  |                 raise exceptions[0] | ||||||
|  | 
 | ||||||
|     def upscale( |     def upscale( | ||||||
|         self, |         self, | ||||||
|         input_path: pathlib.Path, |         input_path: Path, | ||||||
|         output_path: pathlib.Path, |         output_path: Path, | ||||||
|         output_width: int, |         output_width: int, | ||||||
|         output_height: int, |         output_height: int, | ||||||
|         noise: int, |         noise: int, | ||||||
| @ -416,22 +394,21 @@ class Video2X: | |||||||
|             output_path, |             output_path, | ||||||
|             output_width, |             output_width, | ||||||
|             output_height, |             output_height, | ||||||
|             Upscaler, |             ProcessingMode.UPSCALE, | ||||||
|             "upscale", |  | ||||||
|             processes, |             processes, | ||||||
|             ( |             ( | ||||||
|                 output_width, |                 output_width, | ||||||
|                 output_height, |                 output_height, | ||||||
|  |                 algorithm, | ||||||
|                 noise, |                 noise, | ||||||
|                 threshold, |                 threshold, | ||||||
|                 algorithm, |  | ||||||
|             ), |             ), | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|     def interpolate( |     def interpolate( | ||||||
|         self, |         self, | ||||||
|         input_path: pathlib.Path, |         input_path: Path, | ||||||
|         output_path: pathlib.Path, |         output_path: Path, | ||||||
|         processes: int, |         processes: int, | ||||||
|         threshold: float, |         threshold: float, | ||||||
|         algorithm: str, |         algorithm: str, | ||||||
| @ -453,192 +430,7 @@ class Video2X: | |||||||
|             output_path, |             output_path, | ||||||
|             width, |             width, | ||||||
|             height, |             height, | ||||||
|             Interpolator, |             ProcessingMode.INTERPOLATE, | ||||||
|             "interpolate", |  | ||||||
|             processes, |             processes, | ||||||
|             (threshold, algorithm), |             (threshold, algorithm), | ||||||
|         ) |         ) | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def parse_arguments() -> argparse.Namespace: |  | ||||||
|     """ |  | ||||||
|     parse command line arguments |  | ||||||
| 
 |  | ||||||
|     :rtype argparse.Namespace: command parsing results |  | ||||||
|     """ |  | ||||||
|     parser = argparse.ArgumentParser( |  | ||||||
|         prog="video2x", |  | ||||||
|         formatter_class=argparse.ArgumentDefaultsHelpFormatter, |  | ||||||
|     ) |  | ||||||
|     parser.add_argument( |  | ||||||
|         "--version", help="show version information and exit", action="store_true" |  | ||||||
|     ) |  | ||||||
|     parser.add_argument( |  | ||||||
|         "-i", |  | ||||||
|         "--input", |  | ||||||
|         type=pathlib.Path, |  | ||||||
|         help="input file/directory path", |  | ||||||
|         required=True, |  | ||||||
|     ) |  | ||||||
|     parser.add_argument( |  | ||||||
|         "-o", |  | ||||||
|         "--output", |  | ||||||
|         type=pathlib.Path, |  | ||||||
|         help="output file/directory path", |  | ||||||
|         required=True, |  | ||||||
|     ) |  | ||||||
|     parser.add_argument( |  | ||||||
|         "-p", "--processes", type=int, help="number of processes to launch", default=1 |  | ||||||
|     ) |  | ||||||
|     parser.add_argument( |  | ||||||
|         "-l", |  | ||||||
|         "--loglevel", |  | ||||||
|         choices=["trace", "debug", "info", "success", "warning", "error", "critical"], |  | ||||||
|         default="info", |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     # upscaler arguments |  | ||||||
|     action = parser.add_subparsers( |  | ||||||
|         help="action to perform", dest="action", required=True |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     upscale = action.add_parser( |  | ||||||
|         "upscale", |  | ||||||
|         help="upscale a file", |  | ||||||
|         formatter_class=argparse.ArgumentDefaultsHelpFormatter, |  | ||||||
|         add_help=False, |  | ||||||
|     ) |  | ||||||
|     upscale.add_argument( |  | ||||||
|         "--help", action="help", help="show this help message and exit" |  | ||||||
|     ) |  | ||||||
|     upscale.add_argument("-w", "--width", type=int, help="output width") |  | ||||||
|     upscale.add_argument("-h", "--height", type=int, help="output height") |  | ||||||
|     upscale.add_argument("-n", "--noise", type=int, help="denoise level", default=3) |  | ||||||
|     upscale.add_argument( |  | ||||||
|         "-a", |  | ||||||
|         "--algorithm", |  | ||||||
|         choices=UPSCALING_ALGORITHMS, |  | ||||||
|         help="algorithm to use for upscaling", |  | ||||||
|         default=UPSCALING_ALGORITHMS[0], |  | ||||||
|     ) |  | ||||||
|     upscale.add_argument( |  | ||||||
|         "-t", |  | ||||||
|         "--threshold", |  | ||||||
|         type=float, |  | ||||||
|         help=( |  | ||||||
|             "skip if the percent difference between two adjacent frames is below this" |  | ||||||
|             " value; set to 0 to process all frames" |  | ||||||
|         ), |  | ||||||
|         default=0, |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     # interpolator arguments |  | ||||||
|     interpolate = action.add_parser( |  | ||||||
|         "interpolate", |  | ||||||
|         help="interpolate frames for file", |  | ||||||
|         formatter_class=argparse.ArgumentDefaultsHelpFormatter, |  | ||||||
|         add_help=False, |  | ||||||
|     ) |  | ||||||
|     interpolate.add_argument( |  | ||||||
|         "--help", action="help", help="show this help message and exit" |  | ||||||
|     ) |  | ||||||
|     interpolate.add_argument( |  | ||||||
|         "-a", |  | ||||||
|         "--algorithm", |  | ||||||
|         choices=INTERPOLATION_ALGORITHMS, |  | ||||||
|         help="algorithm to use for upscaling", |  | ||||||
|         default=INTERPOLATION_ALGORITHMS[0], |  | ||||||
|     ) |  | ||||||
|     interpolate.add_argument( |  | ||||||
|         "-t", |  | ||||||
|         "--threshold", |  | ||||||
|         type=float, |  | ||||||
|         help=( |  | ||||||
|             "skip if the percent difference between two adjacent frames exceeds this" |  | ||||||
|             " value; set to 100 to interpolate all frames" |  | ||||||
|         ), |  | ||||||
|         default=10, |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     return parser.parse_args() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def main() -> int: |  | ||||||
|     """ |  | ||||||
|     command line entrypoint for direct CLI invocation |  | ||||||
| 
 |  | ||||||
|     :rtype int: 0 if completed successfully, else other int |  | ||||||
|     """ |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         # display version and lawful informaition |  | ||||||
|         if "--version" in sys.argv: |  | ||||||
|             rich_print(LEGAL_INFO) |  | ||||||
|             return 0 |  | ||||||
| 
 |  | ||||||
|         # parse command line arguments |  | ||||||
|         args = parse_arguments() |  | ||||||
| 
 |  | ||||||
|         # check input/output file paths |  | ||||||
|         if not args.input.exists(): |  | ||||||
|             logger.critical(f"Cannot find input file: {args.input}") |  | ||||||
|             return 1 |  | ||||||
|         if not args.input.is_file(): |  | ||||||
|             logger.critical("Input path is not a file") |  | ||||||
|             return 1 |  | ||||||
|         if not args.output.parent.exists(): |  | ||||||
|             logger.critical(f"Output directory does not exist: {args.output.parent}") |  | ||||||
|             return 1 |  | ||||||
| 
 |  | ||||||
|         # set logger level |  | ||||||
|         if os.environ.get("LOGURU_LEVEL") is None: |  | ||||||
|             os.environ["LOGURU_LEVEL"] = args.loglevel.upper() |  | ||||||
| 
 |  | ||||||
|         # remove default handler |  | ||||||
|         logger.remove() |  | ||||||
| 
 |  | ||||||
|         # add new sink with custom handler |  | ||||||
|         logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT) |  | ||||||
| 
 |  | ||||||
|         # print package version and copyright notice |  | ||||||
|         logger.opt(colors=True).info(f"<magenta>Video2X {__version__}</magenta>") |  | ||||||
|         logger.opt(colors=True).info( |  | ||||||
|             "<magenta>Copyright (C) 2018-2022 K4YT3X and contributors.</magenta>" |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         # initialize video2x object |  | ||||||
|         video2x = Video2X() |  | ||||||
| 
 |  | ||||||
|         if args.action == "upscale": |  | ||||||
|             video2x.upscale( |  | ||||||
|                 args.input, |  | ||||||
|                 args.output, |  | ||||||
|                 args.width, |  | ||||||
|                 args.height, |  | ||||||
|                 args.noise, |  | ||||||
|                 args.processes, |  | ||||||
|                 args.threshold, |  | ||||||
|                 args.algorithm, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|         elif args.action == "interpolate": |  | ||||||
|             video2x.interpolate( |  | ||||||
|                 args.input, |  | ||||||
|                 args.output, |  | ||||||
|                 args.processes, |  | ||||||
|                 args.threshold, |  | ||||||
|                 args.algorithm, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|     # don't print the traceback for manual terminations |  | ||||||
|     except KeyboardInterrupt: |  | ||||||
|         return 2 |  | ||||||
| 
 |  | ||||||
|     except Exception as error: |  | ||||||
|         logger.exception(error) |  | ||||||
|         return 1 |  | ||||||
| 
 |  | ||||||
|     # if no exceptions were produced |  | ||||||
|     else: |  | ||||||
|         logger.success("Processing completed successfully") |  | ||||||
|         return 0 |  | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user