ford442 commited on
Commit
0cd7870
·
verified ·
1 Parent(s): 63fb90c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -112,15 +112,16 @@ def load_and_prepare_model():
112
  #pipe.unet.to(memory_format=torch.channels_last)
113
  #pipe.enable_vae_tiling()
114
  pipe.to(device=device, dtype=torch.bfloat16)
115
- pipe.vae = vaeXL #.to('cpu') #.to(torch.bfloat16)
 
116
  pipe.unet.set_attn_processor(AttnProcessor2_0())
117
  pipe.vae.set_default_attn_processor()
118
  return pipe
119
 
120
  pipe = load_and_prepare_model()
121
 
122
- text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True).to(device=device, dtype=torch.bfloat16)
123
- text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
124
 
125
  MAX_SEED = np.iinfo(np.int32).max
126
 
@@ -185,8 +186,8 @@ def generate_30(
185
  ):
186
  seed = random.randint(0, MAX_SEED)
187
  generator = torch.Generator(device='cuda').manual_seed(seed)
188
- pipe.text_encoder=text_encoder
189
- pipe.text_encoder_2=text_encoder_2
190
  options = {
191
  "prompt": [prompt],
192
  "negative_prompt": [negative_prompt],
@@ -227,8 +228,8 @@ def generate_60(
227
  ):
228
  seed = random.randint(0, MAX_SEED)
229
  generator = torch.Generator(device='cuda').manual_seed(seed)
230
- pipe.text_encoder=text_encoder
231
- pipe.text_encoder_2=text_encoder_2
232
  options = {
233
  "prompt": [prompt],
234
  "negative_prompt": [negative_prompt],
@@ -269,8 +270,8 @@ def generate_90(
269
  ):
270
  seed = random.randint(0, MAX_SEED)
271
  generator = torch.Generator(device='cuda').manual_seed(seed)
272
- pipe.text_encoder=text_encoder
273
- pipe.text_encoder_2=text_encoder_2
274
  options = {
275
  "prompt": [prompt],
276
  "negative_prompt": [negative_prompt],
 
112
  #pipe.unet.to(memory_format=torch.channels_last)
113
  #pipe.enable_vae_tiling()
114
  pipe.to(device=device, dtype=torch.bfloat16)
115
+ pipe.vae = vaeXL.to(device) #.to('cpu') #.to(torch.bfloat16)
116
+
117
  pipe.unet.set_attn_processor(AttnProcessor2_0())
118
  pipe.vae.set_default_attn_processor()
119
  return pipe
120
 
121
  pipe = load_and_prepare_model()
122
 
123
+ text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True)#.to(device=device, dtype=torch.bfloat16)
124
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
125
 
126
  MAX_SEED = np.iinfo(np.int32).max
127
 
 
186
  ):
187
  seed = random.randint(0, MAX_SEED)
188
  generator = torch.Generator(device='cuda').manual_seed(seed)
189
+ pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
190
+ pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
191
  options = {
192
  "prompt": [prompt],
193
  "negative_prompt": [negative_prompt],
 
228
  ):
229
  seed = random.randint(0, MAX_SEED)
230
  generator = torch.Generator(device='cuda').manual_seed(seed)
231
+ pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
232
+ pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
233
  options = {
234
  "prompt": [prompt],
235
  "negative_prompt": [negative_prompt],
 
270
  ):
271
  seed = random.randint(0, MAX_SEED)
272
  generator = torch.Generator(device='cuda').manual_seed(seed)
273
+ pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
274
+ pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
275
  options = {
276
  "prompt": [prompt],
277
  "negative_prompt": [negative_prompt],