Update README
Browse files
README.md
CHANGED
@@ -30,22 +30,25 @@ import torch
|
|
30 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
31 |
|
32 |
# Load the tokenizer and model
|
33 |
-
model_path =
|
34 |
-
tokenizer
|
35 |
|
36 |
-
device='cuda'
|
37 |
-
dtype=torch.bfloat16
|
38 |
-
model
|
39 |
|
40 |
# Prepare the input text
|
41 |
-
prompt =
|
42 |
-
|
43 |
|
44 |
# Generate the output
|
45 |
-
|
|
|
|
|
|
|
46 |
|
47 |
# Decode and print the output
|
48 |
-
output_text = tokenizer.decode(
|
49 |
print(output_text)
|
50 |
```
|
51 |
|
|
|
30 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
31 |
|
32 |
# Load the tokenizer and model
|
33 |
+
model_path = 'nvidia/Minitron-4B-Base'
|
34 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
35 |
|
36 |
+
device = 'cuda'
|
37 |
+
dtype = torch.bfloat16
|
38 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device)
|
39 |
|
40 |
# Prepare the input text
|
41 |
+
prompt = 'Complete the paragraph: our solar system is'
|
42 |
+
inputs = tokenizer.encode(prompt, return_tensors='pt').to(model.device)
|
43 |
|
44 |
# Generate the output
|
45 |
+
outputs = model.generate(inputs,
|
46 |
+
max_length=20,
|
47 |
+
num_return_sequences=1,
|
48 |
+
pad_token_id=tokenizer.eos_token_id)
|
49 |
|
50 |
# Decode and print the output
|
51 |
+
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
52 |
print(output_text)
|
53 |
```
|
54 |
|