try fix #849
This commit is contained in:
parent
b95c7378c9
commit
49f47a6f5e
@ -10,11 +10,20 @@ fcbh_cli.parser.add_argument("--language", type=str, default='default',
|
||||
help="Translate UI using json files in [language] folder. "
|
||||
"For example, [--language example] will use [language/example.json] for translation.")
|
||||
|
||||
fcbh_cli.args = fcbh_cli.parser.parse_args()
|
||||
fcbh_cli.args.disable_cuda_malloc = True
|
||||
fcbh_cli.args.auto_launch = True
|
||||
# For example, https://github.com/lllyasviel/Fooocus/issues/849
|
||||
fcbh_cli.parser.add_argument("--enable-smart-memory", action="store_true",
|
||||
help="Force loading models to vram when the unload can be avoided. "
|
||||
"Some Mac users may need this.")
|
||||
|
||||
if getattr(fcbh_cli.args, 'port', 8188) == 8188:
|
||||
fcbh_cli.args.port = None
|
||||
fcbh_cli.parser.set_defaults(
|
||||
disable_cuda_malloc=True,
|
||||
auto_launch=True,
|
||||
port=None
|
||||
)
|
||||
|
||||
fcbh_cli.args = fcbh_cli.parser.parse_args()
|
||||
|
||||
# Disable by default because of issues like https://github.com/lllyasviel/Fooocus/issues/724
|
||||
fcbh_cli.args.disable_smart_memory = not fcbh_cli.args.enable_smart_memory
|
||||
|
||||
args = fcbh_cli.args
|
||||
|
@ -1 +1 @@
|
||||
version = '2.1.774'
|
||||
version = '2.1.775'
|
||||
|
@ -493,20 +493,7 @@ def build_loaded(module, loader_name):
|
||||
return
|
||||
|
||||
|
||||
def disable_smart_memory():
|
||||
print(f'[Fooocus] Disabling smart memory')
|
||||
fcbh.model_management.DISABLE_SMART_MEMORY = True
|
||||
args_manager.args.disable_smart_memory = True
|
||||
fcbh.cli_args.args.disable_smart_memory = True
|
||||
return
|
||||
|
||||
|
||||
def patch_all():
|
||||
# Many recent reports show that Comfyanonymous's method is still not robust enough and many 4090s are broken
|
||||
# We will not use it until this method is really usable
|
||||
# For example https://github.com/lllyasviel/Fooocus/issues/724
|
||||
disable_smart_memory()
|
||||
|
||||
if not hasattr(fcbh.model_management, 'load_models_gpu_origin'):
|
||||
fcbh.model_management.load_models_gpu_origin = fcbh.model_management.load_models_gpu
|
||||
|
||||
|
@ -220,7 +220,7 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or
|
||||
1. Activate your new conda environment, `conda activate fooocus`.
|
||||
1. Install the pygit2, `pip install pygit2==1.12.2`.
|
||||
1. Install the packages required by Fooocus, `pip install -r requirements_versions.txt`.
|
||||
1. Launch Fooocus by running `python entry_with_update.py`. The first time you run Fooocus, it will automatically download the Stable Diffusion SDXL models and will take a significant time, depending on your internet connection.
|
||||
1. Launch Fooocus by running `python entry_with_update.py`. (Some Mac M2 users may need `python entry_with_update.py --enable-smart-memory` to speed up model loading/unloading.) The first time you run Fooocus, it will automatically download the Stable Diffusion SDXL models and will take a significant time, depending on your internet connection.
|
||||
|
||||
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user