openfree commited on
Commit
7fb2129
·
verified ·
1 Parent(s): a0e7bff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -12
app.py CHANGED
@@ -103,14 +103,8 @@ try:
103
 
104
  print(f"Loading LoRA weights from: {lora_path}")
105
 
106
- # LoRA 가중치 로드 방식 수정
107
- pipe.load_lora_weights(
108
- lora_path,
109
- adapter_name="fantasy"
110
- )
111
-
112
- # LoRA 가중치 활성화
113
- pipe.set_adapters(["fantasy"])
114
  pipe.fuse_lora(lora_scale=0.75) # lora_scale 값 조정
115
 
116
  # 메모리 정리
@@ -119,7 +113,6 @@ try:
119
 
120
  print("LoRA weights loaded and fused successfully")
121
  print(f"Current device: {pipe.device}")
122
- print(f"Active adapters: {pipe.active_adapters}")
123
 
124
  except Exception as e:
125
  print(f"Error loading LoRA weights: {str(e)}")
@@ -143,16 +136,14 @@ def generate_image(
143
 
144
  translated_prompt = translate_to_english(prompt)
145
  print(f"Processing prompt: {translated_prompt}")
146
- print(f"Active adapters before generation: {pipe.active_adapters}")
147
 
148
  if randomize_seed:
149
  seed = random.randint(0, MAX_SEED)
150
 
151
  generator = torch.Generator(device=device).manual_seed(seed)
152
 
153
- # LoRA 설정 확인
154
- print(f"LoRA scale: {pipe.lora_scale}")
155
  print(f"Current device: {pipe.device}")
 
156
 
157
  with torch.inference_mode(), torch.cuda.amp.autocast(enabled=True):
158
  image = pipe(
 
103
 
104
  print(f"Loading LoRA weights from: {lora_path}")
105
 
106
+ # LoRA 가중치 로드
107
+ pipe.load_lora_weights(lora_path)
 
 
 
 
 
 
108
  pipe.fuse_lora(lora_scale=0.75) # lora_scale 값 조정
109
 
110
  # 메모리 정리
 
113
 
114
  print("LoRA weights loaded and fused successfully")
115
  print(f"Current device: {pipe.device}")
 
116
 
117
  except Exception as e:
118
  print(f"Error loading LoRA weights: {str(e)}")
 
136
 
137
  translated_prompt = translate_to_english(prompt)
138
  print(f"Processing prompt: {translated_prompt}")
 
139
 
140
  if randomize_seed:
141
  seed = random.randint(0, MAX_SEED)
142
 
143
  generator = torch.Generator(device=device).manual_seed(seed)
144
 
 
 
145
  print(f"Current device: {pipe.device}")
146
+ print(f"Starting image generation...")
147
 
148
  with torch.inference_mode(), torch.cuda.amp.autocast(enabled=True):
149
  image = pipe(