EITD commited on
Commit
68cb88c
·
1 Parent(s): 4edc451
Files changed (2) hide show
  1. app.py +2 -3
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,12 +1,11 @@
1
- from peft import AutoPeftModelForCausalLM
2
- from transformers import AutoTokenizer, TextStreamer
3
  import gradio as gr
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  # client = InferenceClient("EITD/lora_model", token=os.getenv("HF_TOKEN"))
8
 
9
- model = AutoPeftModelForCausalLM.from_pretrained(
10
  "EITD/orpo_llama", # YOUR MODEL YOU USED FOR TRAINING
11
  )
12
  tokenizer = AutoTokenizer.from_pretrained("EITD/orpo_llama")
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
2
  import gradio as gr
3
  """
4
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
5
  """
6
  # client = InferenceClient("EITD/lora_model", token=os.getenv("HF_TOKEN"))
7
 
8
+ model = AutoModelForCausalLM.from_pretrained(
9
  "EITD/orpo_llama", # YOUR MODEL YOU USED FOR TRAINING
10
  )
11
  tokenizer = AutoTokenizer.from_pretrained("EITD/orpo_llama")
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  # huggingface_hub==0.25.2
2
- peft==0.13.2
3
  transformers==4.46.3
4
  # bitsandbytes==0.42.0
5
  # torch==2.5.1
 
1
  # huggingface_hub==0.25.2
2
+ # peft==0.13.2
3
  transformers==4.46.3
4
  # bitsandbytes==0.42.0
5
  # torch==2.5.1