Update README.md
Browse files
README.md
CHANGED
@@ -57,7 +57,7 @@ pipeline = transformers.pipeline(
|
|
57 |
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
58 |
)
|
59 |
|
60 |
-
messages = [{"role": "user", "content": "
|
61 |
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
62 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
63 |
print(outputs[0]["generated_text"])
|
|
|
57 |
model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
|
58 |
)
|
59 |
|
60 |
+
messages = [{"role": "user", "content": "↨explain what a ↨mixture of ↨experts is in less than 100 words."}]
|
61 |
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
62 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
63 |
print(outputs[0]["generated_text"])
|