Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -85,13 +85,13 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
| 85 |
|
| 86 |
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
| 87 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
| 88 |
-
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='tokenizer', token=True)
|
| 89 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='tokenizer_2', token=True)
|
| 90 |
-
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler', token=True)
|
| 91 |
-
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
| 92 |
-
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
| 93 |
|
| 94 |
def load_and_prepare_model():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 96 |
'ford442/RealVisXL_V5.0_BF16',
|
| 97 |
#torch_dtype=torch.bfloat16,
|
|
@@ -99,16 +99,16 @@ def load_and_prepare_model():
|
|
| 99 |
add_watermarker=False,
|
| 100 |
text_encoder=None,
|
| 101 |
text_encoder_2=None,
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
unet=unet,
|
| 106 |
vae=None,
|
| 107 |
)
|
| 108 |
#pipe.scheduler=scheduler
|
| 109 |
#pipe.tokenizer=tokenizer_1
|
| 110 |
#pipe.tokenizer_2=tokenizer_2
|
| 111 |
-
pipe.unet=unet
|
| 112 |
#pipe.vae.do_resize=False
|
| 113 |
#pipe.vae.vae_scale_factor=8
|
| 114 |
#pipe.to(device)
|
|
|
|
| 85 |
|
| 86 |
text_encoder = CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
| 87 |
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
def load_and_prepare_model():
|
| 90 |
+
tokenizer_1 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
|
| 91 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
| 92 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
| 93 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
| 94 |
+
unet = UNet2DConditionModel.from_pretrained("ford442/RealVisXL_V5.0_BF16", low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
| 95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 96 |
'ford442/RealVisXL_V5.0_BF16',
|
| 97 |
#torch_dtype=torch.bfloat16,
|
|
|
|
| 99 |
add_watermarker=False,
|
| 100 |
text_encoder=None,
|
| 101 |
text_encoder_2=None,
|
| 102 |
+
tokenizer=None,
|
| 103 |
+
tokenizer_2=None,
|
| 104 |
+
scheduler=None,
|
| 105 |
unet=unet,
|
| 106 |
vae=None,
|
| 107 |
)
|
| 108 |
#pipe.scheduler=scheduler
|
| 109 |
#pipe.tokenizer=tokenizer_1
|
| 110 |
#pipe.tokenizer_2=tokenizer_2
|
| 111 |
+
#pipe.unet=unet
|
| 112 |
#pipe.vae.do_resize=False
|
| 113 |
#pipe.vae.vae_scale_factor=8
|
| 114 |
#pipe.to(device)
|