Spaces:
Running
Running
Update multit2i.py
Browse files- multit2i.py +1 -1
multit2i.py
CHANGED
@@ -157,7 +157,7 @@ def load_from_model(model_name: str, hf_token: str | Literal[False] | None = Non
|
|
157 |
raise ModelNotFoundError(
|
158 |
f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
159 |
)
|
160 |
-
p = response.json().get("pipeline_tag")
|
161 |
#if p != "text-to-image": raise ModelNotFoundError(f"This model isn't for text-to-image or unsupported: {model_name}.")
|
162 |
headers["X-Wait-For-Model"] = "true"
|
163 |
client = huggingface_hub.InferenceClient(model=model_name, headers=headers,
|
|
|
157 |
raise ModelNotFoundError(
|
158 |
f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
159 |
)
|
160 |
+
#p = response.json().get("pipeline_tag")
|
161 |
#if p != "text-to-image": raise ModelNotFoundError(f"This model isn't for text-to-image or unsupported: {model_name}.")
|
162 |
headers["X-Wait-For-Model"] = "true"
|
163 |
client = huggingface_hub.InferenceClient(model=model_name, headers=headers,
|