davanstrien HF staff commited on
Commit
4e8ec3f
Β·
verified Β·
1 Parent(s): 66f9010

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -45
app.py CHANGED
@@ -1,76 +1,66 @@
1
  import gradio as gr
2
  import json
3
- import random
4
  from datetime import datetime
5
  from theme import TufteInspired
6
- from transformers import AutoTokenizer, AutoModelForCausalLM
7
- from huggingface_hub import login
8
- import torch
9
  import os
 
 
10
  import spaces
 
 
 
 
 
 
11
 
12
- HF_TOKEN = os.getenv("HF_TOKEN")
13
- login(HF_TOKEN)
14
-
15
- # List of models to choose from
16
- model_list = [
17
- "meta-llama/Llama-2-7b-chat-hf",
18
- ]
19
-
20
- # Function to load a random model
21
- @spaces.GPU(duration=120) # Allowing extra time for model loading
22
- def load_random_model():
23
- model_id = random.choice(model_list)
24
- tokenizer = AutoTokenizer.from_pretrained(model_id, add_special_tokens=True)
25
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
26
- return model_id, model, tokenizer
27
 
28
 
29
- @spaces.GPU
30
- def generate_blurb():
31
- model_id, model, tokenizer = load_random_model()
32
- prompt = "Write a blurb for a made-up book:"
33
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
34
-
35
- with torch.no_grad():
36
- outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
37
-
38
- blurb = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
- return f"Model used: {model_id}\n\nBlurb: {blurb}"
40
 
41
  # Function to log blurb and vote
42
  def log_blurb_and_vote(blurb, vote):
43
- log_entry = {
44
- "timestamp": datetime.now().isoformat(),
45
- "blurb": blurb,
46
- "vote": vote
47
- }
48
  with open("blurb_log.jsonl", "a") as f:
49
  f.write(json.dumps(log_entry) + "\n")
50
  return f"Logged: {vote}"
51
 
 
52
  # Create custom theme
53
  tufte_theme = TufteInspired()
54
 
55
  # Create Gradio interface
56
  with gr.Blocks(theme=tufte_theme) as demo:
57
  gr.Markdown("<h1 style='text-align: center;'>Would you read it?</h1>")
58
- gr.Markdown("Click the button to generate a blurb for a made-up book using a random model, then vote on its quality.")
59
-
 
60
  with gr.Row():
61
  generate_btn = gr.Button("Write a Blurb", variant="primary")
62
-
63
- blurb_output = gr.Textbox(label="Generated Blurb", lines=8, interactive=False)
64
-
65
  with gr.Row():
66
  upvote_btn = gr.Button("πŸ‘ would read")
67
  downvote_btn = gr.Button("πŸ‘Ž wouldn't read")
68
-
69
  vote_output = gr.Textbox(label="Vote Status", interactive=False)
70
-
71
  generate_btn.click(generate_blurb, outputs=blurb_output)
72
- upvote_btn.click(lambda x: log_blurb_and_vote(x, "upvote"), inputs=blurb_output, outputs=vote_output)
73
- downvote_btn.click(lambda x: log_blurb_and_vote(x, "downvote"), inputs=blurb_output, outputs=vote_output)
 
 
 
 
 
 
 
 
74
 
75
  if __name__ == "__main__":
76
- demo.launch()
 
1
  import gradio as gr
2
  import json
 
3
  from datetime import datetime
4
  from theme import TufteInspired
5
+ import glob
 
 
6
  import os
7
+ import uuid
8
+ from pathlib import Path
9
  import spaces
10
+ import torch
11
+ import transformers
12
+ from huggingface_hub import CommitScheduler, hf_hub_download, login
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM
14
+ from outlines import models, generate
15
+ from gradio import update
16
 
17
+ model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id, add_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
+ @spaces.GPU(duration=120)
22
+ def generate_blurb(history):
23
+ model = models.transformers(model_id)
24
+ generator = generate.text(model)
25
+ resp = generator("Write a blurb for a book")
26
+ return resp
 
 
 
 
 
27
 
28
  # Function to log blurb and vote
29
  def log_blurb_and_vote(blurb, vote):
30
+ log_entry = {"timestamp": datetime.now().isoformat(), "blurb": blurb, "vote": vote}
 
 
 
 
31
  with open("blurb_log.jsonl", "a") as f:
32
  f.write(json.dumps(log_entry) + "\n")
33
  return f"Logged: {vote}"
34
 
35
+
36
  # Create custom theme
37
  tufte_theme = TufteInspired()
38
 
39
  # Create Gradio interface
40
  with gr.Blocks(theme=tufte_theme) as demo:
41
  gr.Markdown("<h1 style='text-align: center;'>Would you read it?</h1>")
42
+ gr.Markdown(
43
+ "Click the button to generate a blurb for a made-up book, then vote on its quality."
44
+ )
45
  with gr.Row():
46
  generate_btn = gr.Button("Write a Blurb", variant="primary")
47
+ blurb_output = gr.Textbox(label="Generated Blurb", lines=5, interactive=False)
 
 
48
  with gr.Row():
49
  upvote_btn = gr.Button("πŸ‘ would read")
50
  downvote_btn = gr.Button("πŸ‘Ž wouldn't read")
 
51
  vote_output = gr.Textbox(label="Vote Status", interactive=False)
52
+
53
  generate_btn.click(generate_blurb, outputs=blurb_output)
54
+ upvote_btn.click(
55
+ lambda x: log_blurb_and_vote(x, "upvote"),
56
+ inputs=blurb_output,
57
+ outputs=vote_output,
58
+ )
59
+ downvote_btn.click(
60
+ lambda x: log_blurb_and_vote(x, "downvote"),
61
+ inputs=blurb_output,
62
+ outputs=vote_output,
63
+ )
64
 
65
  if __name__ == "__main__":
66
+ demo.launch()