File size: 4,075 Bytes
8f691f9
 
 
 
1cce0f0
8f691f9
1cce0f0
 
 
81e5760
 
8f691f9
47636ca
17110ca
f4fc7fd
3d3da57
ca1b999
30d8145
dea1924
1cce0f0
 
 
 
 
 
8f691f9
 
b2bac59
 
 
1f73d79
d6b5655
 
 
 
b2bac59
 
8f691f9
1cce0f0
1f73d79
 
 
 
dea1924
47636ca
55485d5
a17b4a1
47636ca
 
 
2977066
 
1cce0f0
 
edfa13e
d50086c
1cce0f0
e442d7e
8f691f9
 
52ee8e4
 
ca1b999
 
7c44ba4
6ca6d28
1cce0f0
8f691f9
 
 
 
1cce0f0
8f691f9
990aaae
69a2cd2
ca1b999
0cc25eb
8f691f9
 
 
 
 
 
 
 
 
1cce0f0
 
8f691f9
 
 
 
 
ca1b999
8f691f9
 
 
 
 
 
 
 
 
 
1cce0f0
8f691f9
 
 
 
 
 
ca1b999
 
 
8f691f9
 
 
 
7193aee
8f691f9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
import torch
from huggingface_hub import snapshot_download
import openvino.runtime as ov
from typing import Optional, Dict



model_id = "Disty0/LCM_SoteMix"
#model_id = "Disty0/sotediffusion-v2" #不可

#1024*512 記憶體不足
HIGH=768
WIDTH=512

batch_size = -1
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
    def __init__(
        self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
    ):
        super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)


pipe = OVStableDiffusionPipeline.from_pretrained(
        model_id, 
        compile = False, 
        ov_config = {"CACHE_DIR":""},
        #torch_dtype=torch.int8, #快
        #torch_dtype=torch.bfloat16, #中
        #variant="fp16", 
        torch_dtype=torch.IntTensor, #慢
        use_safetensors=False,
        )

taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), 
                                           parent_model = pipe, 
                                           model_dir = taesd_dir
                                          )
pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
#pipe.load_textual_inversion("./badhandv4.pt", "badhandv4")
#pipe.load_textual_inversion("./Konpeto.pt", "Konpeto")
#<shigure-ui-style>
#pipe.load_textual_inversion("sd-concepts-library/shigure-ui-style")
#pipe.load_textual_inversion("sd-concepts-library/ruan-jia")
#pipe.load_textual_inversion("sd-concepts-library/agm-style-nao")


pipe.compile()

prompt=""
negative_prompt="(worst quality, low quality, lowres), zombie, interlocked fingers,"

def infer(prompt,negative_prompt):

    image = pipe(
        prompt = prompt, 
        negative_prompt = negative_prompt,
        width = WIDTH, 
        height = HIGH,
        guidance_scale=1.0,
        num_inference_steps=8,
        num_images_per_prompt=1,
    ).images[0] 
    
    return image


examples = [
    "Sailor Chibi Moon, Katsura Masakazu style",
    "1girl, silver hair, symbol-shaped pupils, yellow eyes, smiling, light particles, light rays, wallpaper, star guardian, serious face, red inner hair, power aura, grandmaster1, golden and white clothes",
    "A cute kitten, Tinkle style.",
    "(illustration, 8k CG, extremely detailed),(whimsical),catgirl,teenage girl,playing in the snow,winter wonderland,snow-covered trees,soft pastel colors,gentle lighting,sparkling snow,joyful,magical atmosphere,highly detailed,fluffy cat ears and tail,intricate winter clothing,shallow depth of field,watercolor techniques,close-up shot,slightly tilted angle,fairy tale architecture,nostalgic,playful,winter magic,(masterpiece:2),best quality,ultra highres,original,extremely detailed,perfect lighting,",
]

css="""
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""


power_device = "CPU"

with gr.Blocks(css=css) as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Disty0/LCM_SoteMix {WIDTH}x{HIGH}
        Currently running on {power_device}.
        """)
        
        with gr.Row():
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )         
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result", show_label=False)

        gr.Examples(
            examples = examples,
            fn = infer,
            inputs = [prompt],
            outputs = [result]
        )

    run_button.click(
        fn = infer,
        inputs = [prompt],
        outputs = [result]
    )

demo.queue().launch()