Update README.md
Browse files
README.md
CHANGED
@@ -34,33 +34,13 @@ pip install diffusers
|
|
34 |
|
35 |
```python
|
36 |
import torch
|
37 |
-
from diffusers import StableCascadeCombinedPipeline
|
38 |
-
|
39 |
-
|
40 |
|
41 |
device = "cuda"
|
42 |
dtype = torch.bfloat16
|
|
|
43 |
|
44 |
-
|
45 |
-
decoder_model = "Disty0/sotediffusion-wuerstchen3-alpha1-decoder"
|
46 |
-
|
47 |
-
|
48 |
-
prior = StableCascadePriorPipeline.from_pretrained(prior_model, torch_dtype=dtype)
|
49 |
-
decoder = StableCascadeDecoderPipeline.from_pretrained(decoder_model, torch_dtype=dtype)
|
50 |
-
|
51 |
-
pipe = StableCascadeCombinedPipeline(
|
52 |
-
tokenizer=decoder.tokenizer,
|
53 |
-
text_encoder=decoder.text_encoder,
|
54 |
-
decoder=decoder.decoder,
|
55 |
-
scheduler=decoder.scheduler,
|
56 |
-
vqgan=decoder.vqgan,
|
57 |
-
prior_prior=prior.prior,
|
58 |
-
prior_text_encoder=prior.text_encoder,
|
59 |
-
prior_tokenizer=prior.tokenizer,
|
60 |
-
prior_scheduler=prior.scheduler,
|
61 |
-
prior_feature_extractor=prior.feature_extractor,
|
62 |
-
prior_image_encoder=prior.image_encoder)
|
63 |
-
|
64 |
|
65 |
# send everything to the gpu:
|
66 |
pipe = pipe.to(device, dtype=dtype)
|
@@ -87,6 +67,7 @@ output = pipe(
|
|
87 |
).images[0]
|
88 |
|
89 |
## do something with the output image
|
|
|
90 |
```
|
91 |
|
92 |
|
|
|
34 |
|
35 |
```python
|
36 |
import torch
|
37 |
+
from diffusers import StableCascadeCombinedPipeline
|
|
|
|
|
38 |
|
39 |
device = "cuda"
|
40 |
dtype = torch.bfloat16
|
41 |
+
model = "Disty0/sotediffusion-wuerstchen3-alpha1-decoder"
|
42 |
|
43 |
+
pipe = StableCascadeCombinedPipeline.from_pretrained(model, torch_dtype=dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
# send everything to the gpu:
|
46 |
pipe = pipe.to(device, dtype=dtype)
|
|
|
67 |
).images[0]
|
68 |
|
69 |
## do something with the output image
|
70 |
+
|
71 |
```
|
72 |
|
73 |
|