daily maintain
This commit is contained in:
parent
e564ba46a6
commit
034367ab48
@ -285,15 +285,14 @@ def attention_pytorch(q, k, v, heads, mask=None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
|
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
|
||||||
|
|
||||||
if exists(mask):
|
|
||||||
raise NotImplementedError
|
|
||||||
out = (
|
out = (
|
||||||
out.transpose(1, 2).reshape(b, -1, heads * dim_head)
|
out.transpose(1, 2).reshape(b, -1, heads * dim_head)
|
||||||
)
|
)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
optimized_attention = attention_basic
|
optimized_attention = attention_basic
|
||||||
|
optimized_attention_masked = attention_basic
|
||||||
|
|
||||||
if model_management.xformers_enabled():
|
if model_management.xformers_enabled():
|
||||||
print("Using xformers cross attention")
|
print("Using xformers cross attention")
|
||||||
@ -309,6 +308,9 @@ else:
|
|||||||
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
|
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
|
||||||
optimized_attention = attention_sub_quad
|
optimized_attention = attention_sub_quad
|
||||||
|
|
||||||
|
if model_management.pytorch_attention_enabled():
|
||||||
|
optimized_attention_masked = attention_pytorch
|
||||||
|
|
||||||
class CrossAttention(nn.Module):
|
class CrossAttention(nn.Module):
|
||||||
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=fcbh.ops):
|
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=fcbh.ops):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -334,7 +336,10 @@ class CrossAttention(nn.Module):
|
|||||||
else:
|
else:
|
||||||
v = self.to_v(context)
|
v = self.to_v(context)
|
||||||
|
|
||||||
out = optimized_attention(q, k, v, self.heads, mask)
|
if mask is None:
|
||||||
|
out = optimized_attention(q, k, v, self.heads)
|
||||||
|
else:
|
||||||
|
out = optimized_attention_masked(q, k, v, self.heads, mask)
|
||||||
return self.to_out(out)
|
return self.to_out(out)
|
||||||
|
|
||||||
|
|
||||||
|
@ -667,7 +667,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
#FP16 is just broken on these cards
|
#FP16 is just broken on these cards
|
||||||
nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX"]
|
nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"]
|
||||||
for x in nvidia_16_series:
|
for x in nvidia_16_series:
|
||||||
if x in props.name:
|
if x in props.name:
|
||||||
return False
|
return False
|
||||||
|
@ -1 +1 @@
|
|||||||
version = '2.1.682'
|
version = '2.1.683'
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Fooocus Anime",
|
"name": "Fooocus Anime",
|
||||||
"prompt": "{prompt}, (masterpiece, best quality, ultra-detailed:1.1), illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings, by wlop",
|
"prompt": "(masterpiece:1.1), (best quality, ultra-detailed:1.2), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings, by wlop",
|
||||||
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair,extra digit, fewer digits, cropped, worst quality, low quality"
|
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair,extra digit, fewer digits, cropped, worst quality, low quality"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
10
webui.py
10
webui.py
@ -2,6 +2,7 @@ from python_hijack import *
|
|||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
import random
|
import random
|
||||||
|
import os
|
||||||
import time
|
import time
|
||||||
import shared
|
import shared
|
||||||
import modules.path
|
import modules.path
|
||||||
@ -16,7 +17,6 @@ import args_manager
|
|||||||
from modules.sdxl_styles import legal_style_names, aspect_ratios
|
from modules.sdxl_styles import legal_style_names, aspect_ratios
|
||||||
from modules.private_logger import get_current_html_path
|
from modules.private_logger import get_current_html_path
|
||||||
from modules.ui_gradio_extensions import reload_javascript
|
from modules.ui_gradio_extensions import reload_javascript
|
||||||
from os.path import exists
|
|
||||||
|
|
||||||
|
|
||||||
def generate_clicked(*args):
|
def generate_clicked(*args):
|
||||||
@ -348,9 +348,11 @@ with shared.gradio_root:
|
|||||||
.then(lambda: (gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)), outputs=[generate_button, stop_button, skip_button]) \
|
.then(lambda: (gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)), outputs=[generate_button, stop_button, skip_button]) \
|
||||||
.then(fn=None, _js='playNotification')
|
.then(fn=None, _js='playNotification')
|
||||||
|
|
||||||
notification_file = 'notification.ogg' if exists('notification.ogg') else 'notification.mp3' if exists('notification.mp3') else None
|
for notification_file in ['notification.ogg', 'notification.mp3']:
|
||||||
if notification_file != None:
|
if os.path.exists(notification_file):
|
||||||
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
|
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
shared.gradio_root.launch(
|
shared.gradio_root.launch(
|
||||||
inbrowser=args_manager.args.auto_launch,
|
inbrowser=args_manager.args.auto_launch,
|
||||||
|
Loading…
Reference in New Issue
Block a user