Hzqhssn commited on
Commit
4c33ae7
β€’
1 Parent(s): 1de9ea7

initial push

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. app.py +35 -0
  3. requirements.txt +10 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Virtual environments
2
+ venv
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
3
+ import torch._dynamo
4
+ torch._dynamo.config.suppress_errors = True
5
+
6
+ # Load the model and tokenizer
7
+ model_id = "answerdotai/ModernBERT-base" # Replace with your conversational model if needed
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForMaskedLM.from_pretrained(model_id)
10
+
11
+ # Function for conversation
12
+ def conversation(input_text):
13
+ # Prepare the input text with a [MASK] token for a masked language model
14
+ inputs = tokenizer(input_text, return_tensors="pt")
15
+
16
+ # Generate predictions
17
+ outputs = model(**inputs)
18
+
19
+ masked_index = inputs["input_ids"][0].tolist().index(tokenizer.mask_token_id)
20
+ predicted_token_id = outputs.logits[0, masked_index].argmax(axis=-1)
21
+ predicted_token = tokenizer.decode(predicted_token_id)
22
+
23
+ return f"Predicted response: {predicted_token}"
24
+
25
+ # Define the Gradio interface
26
+ interface = gr.Interface(
27
+ fn=conversation,
28
+ inputs=gr.Textbox(label="Enter your text (include [MASK]):"),
29
+ outputs=gr.Textbox(label="Predicted Response"),
30
+ title="Masked Language Model Conversation",
31
+ description="Type a sentence with [MASK] to predict the masked word using ModernBERT."
32
+ )
33
+
34
+ # Launch the interface
35
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ requests
4
+ Pillow
5
+ open_clip_torch
6
+ diffusers
7
+ transformers
8
+ bloom
9
+ # This is only needed for local deployment
10
+ gradio