AlekseyCalvin commited on
Commit
ee3a281
·
verified ·
1 Parent(s): 3f59028

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -23,6 +23,11 @@ os.environ["TRANSFORMERS_CACHE"] = cache_path
23
  os.environ["HF_HUB_CACHE"] = cache_path
24
  os.environ["HF_HOME"] = cache_path
25
 
 
 
 
 
 
26
  clipmodel = 'long' # 'norm', 'long' (my fine-tunes) - 'oai', 'orgL' (OpenAI / BeichenZhang original)
27
  selectedprompt = 'long' # 'tiny' (51 tokens), 'short' (75), 'med' (116), 'long' (203)
28
 
@@ -30,17 +35,10 @@ if clipmodel == "long":
30
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
31
  config = CLIPConfig.from_pretrained(model_id)
32
  maxtokens = 248
33
-
34
- torch.backends.cuda.matmul.allow_tf32 = True
35
-
36
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to(device)
37
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
38
  config.text_config.max_position_embeddings = 248
39
 
40
-
41
- pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
42
- pipe.to(device="cuda", dtype=torch.bfloat16)
43
-
44
  pipe.tokenizer = clip_processor.tokenizer
45
  pipe.text_encoder = clip_model.text_model
46
  pipe.tokenizer_max_length = maxtokens
 
23
  os.environ["HF_HUB_CACHE"] = cache_path
24
  os.environ["HF_HOME"] = cache_path
25
 
26
+
27
+ torch.backends.cuda.matmul.allow_tf32 = True
28
+
29
+ pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
30
+ pipe.to(device="cuda", dtype=torch.bfloat16)
31
  clipmodel = 'long' # 'norm', 'long' (my fine-tunes) - 'oai', 'orgL' (OpenAI / BeichenZhang original)
32
  selectedprompt = 'long' # 'tiny' (51 tokens), 'short' (75), 'med' (116), 'long' (203)
33
 
 
35
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
36
  config = CLIPConfig.from_pretrained(model_id)
37
  maxtokens = 248
 
 
 
38
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to(device)
39
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
40
  config.text_config.max_position_embeddings = 248
41
 
 
 
 
 
42
  pipe.tokenizer = clip_processor.tokenizer
43
  pipe.text_encoder = clip_model.text_model
44
  pipe.tokenizer_max_length = maxtokens