robinhad commited on
Commit
d92dc0a
·
verified ·
1 Parent(s): 4685ad0

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +62 -3
  2. requirements.txt +4 -1
app.py CHANGED
@@ -1,12 +1,34 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
8
 
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -39,10 +61,23 @@ def respond(
39
  response += token
40
  yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
45
- demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
@@ -56,8 +91,32 @@ demo = gr.ChatInterface(
56
  label="Top-p (nucleus sampling)",
57
  ),
58
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  )
 
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from datetime import datetime
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+ lora_name = "robinhad/UAlpaca-1.1-Mistral-7B"
10
 
11
+ from peft import PeftModel
12
+ from transformers import LlamaTokenizer, LlamaForCausalLM, BitsAndBytesConfig
13
+ from torch import bfloat16
14
+ model_name = "mistralai/Mistral-7B-v0.1"
15
 
16
+ quant_config = BitsAndBytesConfig(
17
+ load_in_4bit=True,
18
+ bnb_4bit_quant_type="nf4",
19
+ bnb_4bit_use_double_quant=True,
20
+ bnb_4bit_compute_dtype=bfloat16
21
+ )
22
+ tokenizer = LlamaTokenizer.from_pretrained(model_name)
23
+ model = LlamaForCausalLM.from_pretrained(
24
+ model_name,
25
+ quantization_config=quant_config,
26
+ device_map="auto",
27
+ )
28
+ model = PeftModel.from_pretrained(model, lora_name)
29
+
30
+
31
+ # will be used with normal template
32
  def respond(
33
  message,
34
  history: list[tuple[str, str]],
 
61
  response += token
62
  yield response
63
 
64
+
65
+ def ask(instruction: str, context: str = None):
66
+ print(datetime.now(), instruction, context)
67
+ full_question = ""
68
+ if context is None:
69
+ prepend = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
70
+ full_question = prepend + f"### Instruction:\n{instruction}\n\n### Response:\n"
71
+ else:
72
+ prepend = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
73
+ full_question = prepend + f"### Instruction:\n{instruction}\n\n### Input:\n{context}\n\n### Response:\n"
74
+ full_question = tokenizer.encode(full_question, return_tensors="pt")
75
+ return tokenizer.batch_decode(model.generate(full_question, max_new_tokens=300))[0].split("### Response:")[1].strip().replace("</s>", "")
76
+
77
  """
78
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
79
  """
80
+ """demo = gr.ChatInterface(
81
  respond,
82
  additional_inputs=[
83
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
 
91
  label="Top-p (nucleus sampling)",
92
  ),
93
  ],
94
+ )"""
95
+
96
+ model_name = "robinhad/UAlpaca-1.1-Mistral-7B"
97
+
98
+
99
+ def image_classifier(inp):
100
+ return {"cat": 0.3, "dog": 0.7}
101
+
102
+
103
+ demo = gr.Interface(
104
+ title=f"Inference demo for '{model_name}' model, instruction-tuned for Ukrainian",
105
+ fn=ask,
106
+ inputs=[gr.Textbox(label="Input"), gr.Textbox(label="Context")],
107
+ outputs="label",
108
+ examples=[
109
+ ["Як звали батька Тараса Григоровича Шевченка?", None],
110
+ ["Як можна заробити нелегально швидко гроші?", None],
111
+ ["Яка найвища гора в Україні?", None],
112
+ ["Розкажи історію про Івасика-Телесика", None],
113
+ ["Яка з цих гір не знаходиться у Європі?", "Говерла, Монблан, Гран-Парадізо, Еверест"],
114
+ [
115
+ "Дай відповідь на питання", "Чому у качки жовті ноги?"
116
+ ]],
117
  )
118
+ demo.launch()
119
 
120
 
121
  if __name__ == "__main__":
122
+ demo.launch()
requirements.txt CHANGED
@@ -2,4 +2,7 @@ huggingface_hub==0.22.2
2
  numpy<2
3
  transformers
4
  bitsandbytes
5
- torch
 
 
 
 
2
  numpy<2
3
  transformers
4
  bitsandbytes
5
+ torch
6
+ peft
7
+ sentencepiece
8
+ protobuf