feat: allow users to specify the number of threads when running on CPU (#1601)

* CPU_NUM_THREADS

* refactor: optimize code, type is already strict

---------

Co-authored-by: Manuel Schmid <manuel.schmid@odt.net>
This commit is contained in:
Maxim Saplin 2024-02-25 19:14:17 +03:00 committed by GitHub
parent ef1999c52c
commit 4d34f31a72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 5 additions and 2 deletions

View File

@ -100,7 +100,7 @@ vram_group.add_argument("--always-high-vram", action="store_true")
vram_group.add_argument("--always-normal-vram", action="store_true")
vram_group.add_argument("--always-low-vram", action="store_true")
vram_group.add_argument("--always-no-vram", action="store_true")
vram_group.add_argument("--always-cpu", action="store_true")
vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)
parser.add_argument("--always-offload-from-vram", action="store_true")

View File

@ -60,6 +60,9 @@ except:
pass
if args.always_cpu:
if args.always_cpu > 0:
torch.set_num_threads(args.always_cpu)
print(f"Running on {torch.get_num_threads()} CPU threads")
cpu_state = CPUState.CPU
def is_intel_xpu():

View File

@ -370,7 +370,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT]
[--attention-split | --attention-quad | --attention-pytorch]
[--disable-xformers]
[--always-gpu | --always-high-vram | --always-normal-vram |
--always-low-vram | --always-no-vram | --always-cpu]
--always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
[--always-offload-from-vram] [--disable-server-log]
[--debug-mode] [--is-windows-embedded-python]
[--disable-server-info] [--share] [--preset PRESET]