andrewzamai
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -323,17 +323,32 @@ from vllm import LLM, SamplingParams
|
|
323 |
vllm_model = LLM(model="expertai/SLIMER-PARALLEL-LLaMA3")
|
324 |
tokenizer = vllm_model.get_tokenizer()
|
325 |
|
|
|
|
|
326 |
sampling_params = SamplingParams(temperature=0, max_tokens=1000, stop=tokenizer.eos_token)
|
327 |
|
328 |
-
#
|
329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
# this promper formats the input text to analize with SLIMER instruction
|
331 |
input_instruction_prompter = Prompter('LLaMA3-chat-NOheaders', template_path='./src/SFT_finetuning/templates')
|
332 |
|
333 |
system_message = "You are a helpful NER assistant designed to output JSON."
|
334 |
conversation = [
|
335 |
{"role": "system", "content": system_message},
|
336 |
-
{"role": "user", "content": input_instruction_prompter.generate_prompt(input=
|
337 |
]
|
338 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, truncation=True, max_length=cutoff_len, add_generation_prompt=True)
|
339 |
|
|
|
323 |
vllm_model = LLM(model="expertai/SLIMER-PARALLEL-LLaMA3")
|
324 |
tokenizer = vllm_model.get_tokenizer()
|
325 |
|
326 |
+
# suggested temperature 0, max_tokens hyperparam
|
327 |
+
cutoff_len = 4096
|
328 |
sampling_params = SamplingParams(temperature=0, max_tokens=1000, stop=tokenizer.eos_token)
|
329 |
|
330 |
+
# given list of NE types and dictionary of Def and Guidelines for each --> returns instruction
|
331 |
+
slimer_prompter = SLIMER_PARALLEL_instruction_prompter("SLIMER_PARALLEL_instruction_template", './src/SFT_finetuning/templates')
|
332 |
+
|
333 |
+
# create a dictionary of dictionaries, each NE_type as key should have a {Definition: str, Guidelines: str} value
|
334 |
+
ne_types_list = ['ORGANIZATION', 'UNIVERSITY', 'LOCATION', 'PERSON', 'CONFERENCE']
|
335 |
+
def_guidelines_per_NE_dict = {'ORGANIZATION': {'Definition': "'organization' refers to structured groups, institutions, companies, or associations.", 'Guidelines': "Avoid labeling generic terms like 'team' or 'group'. Exercise caution with ambiguous entities like 'Apple' (company vs. fruit) and 'Manchester United' (sports team vs. fan club)."}, 'UNIVERSITY': {'Definition': 'UNIVERSITY represents educational institutions that offer higher education and academic research programs.', 'Guidelines': "Avoid labeling general concepts such as 'education' or 'academia' as UNIVERSITY. Exercise caution with ambiguous terms like 'Cambridge' (can refer to different institutions) and 'Harvard' (can refer to a person)."}, 'LOCATION': {'Definition': 'LOCATION refers to specific geographic entities such as venues, facilities, and institutions that represent physical places with distinct addresses or functions.', 'Guidelines': "Exercise caution with ambiguous terms, e.g., 'Amazon' (company, river, and region) and 'Cambridge' (U.S. city, UK city, and university). Consider the context and specificity to accurately classify locations."}, 'PERSON': {'Definition': 'PERSON refers to individuals, including public figures, celebrities, and notable personalities.', 'Guidelines': 'If a person is working on research (including professor, Ph.D. student, researcher in companies, and etc) avoid labeling it as PERSON entity.'}, 'CONFERENCE': {'Definition': 'CONFERENCE refers to specific events or gatherings where experts, researchers, and professionals convene to present and discuss their work in a particular field or discipline.', 'Guidelines': "Exercise caution when labeling entities that could refer to institutions, organizations, or associations rather than specific events. Take care with ambiguous terms like 'International Journal of Computer Vision', which may refer to a publication rather than a conference."}}
|
336 |
+
|
337 |
+
instruction = slimer_prompter.generate_prompt(
|
338 |
+
ne_tags=", ".join(ne_types_list),
|
339 |
+
def_and_guidelines=json.dumps(def_guidelines_per_NE_dict, indent=2),
|
340 |
+
expected_json_format=json.dumps({k: [] for k in def_guidelines_per_NE_dict.keys()}, indent=2)
|
341 |
+
)
|
342 |
+
|
343 |
+
input_text = 'Typical generative model approaches include naive Bayes classifier s , Gaussian mixture model s , variational autoencoders and others .'
|
344 |
+
|
345 |
# this promper formats the input text to analize with SLIMER instruction
|
346 |
input_instruction_prompter = Prompter('LLaMA3-chat-NOheaders', template_path='./src/SFT_finetuning/templates')
|
347 |
|
348 |
system_message = "You are a helpful NER assistant designed to output JSON."
|
349 |
conversation = [
|
350 |
{"role": "system", "content": system_message},
|
351 |
+
{"role": "user", "content": input_instruction_prompter.generate_prompt(input=input_text, instruction=instruction)}, # the input_text + instruction
|
352 |
]
|
353 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, truncation=True, max_length=cutoff_len, add_generation_prompt=True)
|
354 |
|