liang.zhao commited on
Commit
1cc0980
·
1 Parent(s): 9c879ee

update model and config

Browse files
Files changed (1) hide show
  1. README.md +9 -6
README.md CHANGED
@@ -84,16 +84,19 @@ user_message = prompt_template.format(input=prompt, response_a=responseA, respon
84
 
85
  conversation = [{"role": "user", "content": user_message}]
86
 
87
- device = "cuda:0"
88
- model_name = "Skywork/Skywork-Critic-Llama3.1-8B"
 
 
 
 
89
  tokenizer = AutoTokenizer.from_pretrained(model_name)
90
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map=device,)
91
 
92
  input_ids = tokenizer.apply_chat_template(
93
  conversation,
94
  tokenize=True,
95
- add_generation_prompt=True,
96
- return_tensors="pt").to(device)
97
 
98
  generation = model.generate(
99
  input_ids=input_ids,
@@ -142,4 +145,4 @@ If you find our work helpful, please feel free to cite us using the following Bi
142
  howpublished={\url{https://huggingface.co/Skywork}},
143
  url={https://huggingface.co/Skywork},
144
  }
145
- ```
 
84
 
85
  conversation = [{"role": "user", "content": user_message}]
86
 
87
+ model_name = "Skywork/Skywork-Critic-Llama3.1-70B"
88
+ model = AutoModelForCausalLM.from_pretrained(
89
+ model_name,
90
+ torch_dtype="auto",
91
+ device_map="auto"
92
+ )
93
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
94
 
95
  input_ids = tokenizer.apply_chat_template(
96
  conversation,
97
  tokenize=True,
98
+ add_generation_prompt=True,
99
+ return_tensors="pt").to(model.device)
100
 
101
  generation = model.generate(
102
  input_ids=input_ids,
 
145
  howpublished={\url{https://huggingface.co/Skywork}},
146
  url={https://huggingface.co/Skywork},
147
  }
148
+ ```