Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ multi_br_tag_pattern = re.compile(re.compile(r'<br>\s*(<br>\s*)*'))
|
|
52 |
repl_linebreak = "\n"
|
53 |
repl_empty_str = ""
|
54 |
|
55 |
-
TITLE = "
|
56 |
|
57 |
ABSTRACT = """
|
58 |
Stambecco is a Italian Instruction-following model based on the [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) model. It comes in two versions: 7b and 13b parameters. It is trained on an Italian version of the [GPT-4-LLM](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) dataset, a dataset of `GPT-4` generated instruction-following data.
|
@@ -126,7 +126,8 @@ def load_model(
|
|
126 |
model = LlamaForCausalLM.from_pretrained(
|
127 |
base,
|
128 |
load_in_8bit=True,
|
129 |
-
device_map="
|
|
|
130 |
)
|
131 |
# model = PeftModel.from_pretrained(model, finetuned, device_map={'': 0})
|
132 |
|
|
|
52 |
repl_linebreak = "\n"
|
53 |
repl_empty_str = ""
|
54 |
|
55 |
+
TITLE = "Galileo"
|
56 |
|
57 |
ABSTRACT = """
|
58 |
Stambecco is a Italian Instruction-following model based on the [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) model. It comes in two versions: 7b and 13b parameters. It is trained on an Italian version of the [GPT-4-LLM](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) dataset, a dataset of `GPT-4` generated instruction-following data.
|
|
|
126 |
model = LlamaForCausalLM.from_pretrained(
|
127 |
base,
|
128 |
load_in_8bit=True,
|
129 |
+
device_map="from_pretrained",
|
130 |
+
load_in_8bit_fp32_cpu_offload=True
|
131 |
)
|
132 |
# model = PeftModel.from_pretrained(model, finetuned, device_map={'': 0})
|
133 |
|