Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (muc
|
|
13 |
|
14 |
from rwkv.model import RWKV
|
15 |
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-pile-7b", filename=f"{title}.pth")
|
16 |
-
model = RWKV(model=model_path, strategy='cuda fp16i8 *
|
17 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
18 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
19 |
|
@@ -55,6 +55,8 @@ def evaluate(
|
|
55 |
|
56 |
instruction = instruction.strip()
|
57 |
input = input.strip()
|
|
|
|
|
58 |
ctx = generate_prompt(instruction, input)
|
59 |
|
60 |
gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
|
@@ -88,10 +90,16 @@ def evaluate(
|
|
88 |
torch.cuda.empty_cache()
|
89 |
yield out_str.strip()
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
g = gr.Interface(
|
92 |
fn=evaluate,
|
93 |
inputs=[
|
94 |
-
gr.components.Textbox(lines=2, label="Instruction", value="Tell me about
|
95 |
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
|
96 |
gr.components.Slider(minimum=10, maximum=250, step=10, value=200), # token_count
|
97 |
gr.components.Slider(minimum=0.2, maximum=2.0, step=0.1, value=1.0), # temperature
|
@@ -107,6 +115,8 @@ g = gr.Interface(
|
|
107 |
],
|
108 |
title=f"🐦Raven {title}",
|
109 |
description="Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and more.",
|
|
|
|
|
110 |
)
|
111 |
g.queue(concurrency_count=1, max_size=10)
|
112 |
g.launch(share=False)
|
|
|
13 |
|
14 |
from rwkv.model import RWKV
|
15 |
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-pile-7b", filename=f"{title}.pth")
|
16 |
+
model = RWKV(model=model_path, strategy='cuda fp16i8 *10 -> cuda fp16')
|
17 |
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
18 |
pipeline = PIPELINE(model, "20B_tokenizer.json")
|
19 |
|
|
|
55 |
|
56 |
instruction = instruction.strip()
|
57 |
input = input.strip()
|
58 |
+
if len(instruction) == 0:
|
59 |
+
return 'Error: please enter some instruction'
|
60 |
ctx = generate_prompt(instruction, input)
|
61 |
|
62 |
gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
|
|
|
90 |
torch.cuda.empty_cache()
|
91 |
yield out_str.strip()
|
92 |
|
93 |
+
examples = [
|
94 |
+
["Tell me about ravens.", 200, 1.0, 0.7, 0.2, 0.2],
|
95 |
+
["Explain the following metaphor: Life is like cats.", 200, 1.0, 0.7, 0.2, 0.2],
|
96 |
+
["Write a python function to read data from an excel file.", 200, 1.0, 0.7, 0.2, 0.2],
|
97 |
+
]
|
98 |
+
|
99 |
g = gr.Interface(
|
100 |
fn=evaluate,
|
101 |
inputs=[
|
102 |
+
gr.components.Textbox(lines=2, label="Instruction", value="Tell me about ravens."),
|
103 |
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
|
104 |
gr.components.Slider(minimum=10, maximum=250, step=10, value=200), # token_count
|
105 |
gr.components.Slider(minimum=0.2, maximum=2.0, step=0.1, value=1.0), # temperature
|
|
|
115 |
],
|
116 |
title=f"🐦Raven {title}",
|
117 |
description="Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and more.",
|
118 |
+
examples=examples,
|
119 |
+
cache_examples=False,
|
120 |
)
|
121 |
g.queue(concurrency_count=1, max_size=10)
|
122 |
g.launch(share=False)
|