Disty0 commited on
Commit
ce5e479
·
verified ·
1 Parent(s): 8d46ee0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +27 -7
README.md CHANGED
@@ -62,13 +62,33 @@ pip install diffusers
62
 
63
  ```python
64
  import torch
65
- from diffusers import StableCascadeCombinedPipeline
 
 
66
 
67
  device = "cuda"
68
  dtype = torch.bfloat16
69
- model = "Disty0/sotediffusion-wuerstchen3-alpha1-decoder"
70
 
71
- pipe = StableCascadeCombinedPipeline.from_pretrained(model, torch_dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # send everything to the gpu:
74
  pipe = pipe.to(device, dtype=dtype)
@@ -79,16 +99,16 @@ pipe.prior_pipe = pipe.prior_pipe.to(device, dtype=dtype)
79
 
80
 
81
 
82
- prompt = "extremely aesthetic, best quality, newest, general, 1girl, solo, looking at viewer, blush, slight smile, cat ears, long hair, dress, bare shoulders, cherry blossoms, flowers, petals, vegetation, wind,"
83
- negative_prompt = "very displeasing, worst quality, oldest, monochrome, sketch, loli, child,"
84
 
85
  output = pipe(
86
  width=1024,
87
  height=1536,
88
  prompt=prompt,
89
  negative_prompt=negative_prompt,
90
- decoder_guidance_scale=1.2,
91
- prior_guidance_scale=8.0,
92
  prior_num_inference_steps=40,
93
  output_type="pil",
94
  num_inference_steps=10
 
62
 
63
  ```python
64
  import torch
65
+ from diffusers import StableCascadeCombinedPipeline, StableCascadePriorPipeline, StableCascadeDecoderPipeline
66
+
67
+
68
 
69
  device = "cuda"
70
  dtype = torch.bfloat16
 
71
 
72
+ prior_model = "Disty0/sotediffusion-wuerstchen3-alpha1"
73
+ decoder_model = "Disty0/sotediffusion-wuerstchen3-alpha1-decoder"
74
+
75
+
76
+ prior = diffusers.StableCascadePriorPipeline.from_pretrained(prior_model, torch_dtype=dtype)
77
+ decoder = diffusers.StableCascadeDecoderPipeline.from_pretrained(decoder_model, torch_dtype=dtype)
78
+
79
+ pipe = diffusers.StableCascadeCombinedPipeline(
80
+ tokenizer=decoder.tokenizer,
81
+ text_encoder=decoder.text_encoder,
82
+ decoder=decoder.decoder,
83
+ scheduler=decoder.scheduler,
84
+ vqgan=decoder.vqgan,
85
+ prior_prior=prior.prior,
86
+ prior_text_encoder=prior.text_encoder,
87
+ prior_tokenizer=prior.tokenizer,
88
+ prior_scheduler=prior.scheduler,
89
+ prior_feature_extractor=prior.feature_extractor,
90
+ prior_image_encoder=prior.image_encoder)
91
+
92
 
93
  # send everything to the gpu:
94
  pipe = pipe.to(device, dtype=dtype)
 
99
 
100
 
101
 
102
+ prompt = "1girl, solo, cowboy shot, straight hair, looking at viewer, hoodie, indoors, slight smile, casual, furniture, doorway, very aesthetic, best quality, newest,"
103
+ negative_prompt = "very displeasing, worst quality, oldest, monochrome, sketch, realistic,"
104
 
105
  output = pipe(
106
  width=1024,
107
  height=1536,
108
  prompt=prompt,
109
  negative_prompt=negative_prompt,
110
+ decoder_guidance_scale=1.0,
111
+ prior_guidance_scale=12.0,
112
  prior_num_inference_steps=40,
113
  output_type="pil",
114
  num_inference_steps=10