tianleliphoebe commited on
Commit
52026b9
·
1 Parent(s): 45c2aa5

add secrete

Browse files
Files changed (1) hide show
  1. model/model_manager.py +3 -3
model/model_manager.py CHANGED
@@ -2,7 +2,7 @@ import concurrent.futures
2
  import random
3
  import gradio as gr
4
  import requests
5
- import io, base64, json
6
  import spaces
7
  from PIL import Image
8
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, DESIRED_APPEAR_MODEL, load_pipeline
@@ -33,8 +33,8 @@ class ModelManager:
33
  model_id = "meta-llama/Meta-Llama-Guard-2-8B"
34
  device = "cuda"
35
  dtype = torch.bfloat16
36
- tokenizer = AutoTokenizer.from_pretrained(model_id)
37
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=device)
38
  chat = [{"role": "user", "content": prompt}]
39
  input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
40
  output = model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0)
 
2
  import random
3
  import gradio as gr
4
  import requests
5
+ import io, base64, json, os
6
  import spaces
7
  from PIL import Image
8
  from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, MUSEUM_UNSUPPORTED_MODELS, DESIRED_APPEAR_MODEL, load_pipeline
 
33
  model_id = "meta-llama/Meta-Llama-Guard-2-8B"
34
  device = "cuda"
35
  dtype = torch.bfloat16
36
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=os.environ['HF_GUARD'])
37
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=device, token=os.environ['HF_GUARD'])
38
  chat = [{"role": "user", "content": prompt}]
39
  input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
40
  output = model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0)