import spaces # 必须在最顶部导入 import gradio as gr import os # 获取 Hugging Face 访问令牌 hf_token = os.getenv("HF_API_TOKEN") # 定义基础模型名称 base_model_name = "larry1129/meta-llama-3.1-8b-bnb-4bit" # 定义 adapter 模型名称 adapter_model_name = "larry1129/WooWoof_AI" # 定义全局变量用于缓存模型和分词器 model = None tokenizer = None # 定义提示生成函数 def generate_prompt(instruction, input_text=""): if input_text: prompt = f"""### Instruction: {instruction} ### Input: {input_text} ### Response: """ else: prompt = f"""### Instruction: {instruction} ### Response: """ return prompt # 定义生成响应的函数,并使用 @spaces.GPU 装饰 @spaces.GPU(duration=120) def generate_response(instruction, input_text): global model, tokenizer if model is None: # 检查 bitsandbytes 是否已安装 import importlib.util if importlib.util.find_spec("bitsandbytes") is None: import subprocess subprocess.call(["pip", "install", "--upgrade", "bitsandbytes"]) # 在函数内部导入需要 GPU 的库 import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig from peft import PeftModel # 创建量化配置 bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 ) # 加载分词器 tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_auth_token=hf_token) # 加载基础模型 base_model = AutoModelForCausalLM.from_pretrained( base_model_name, quantization_config=bnb_config, device_map="auto", use_auth_token=hf_token, trust_remote_code=True ) # 加载 adapter 并将其应用到基础模型上 # 加载适配器模型时,忽略未知参数 try: model = PeftModel.from_pretrained( base_model, adapter_model_name, torch_dtype=torch.float16, use_auth_token=hf_token ) except TypeError as e: if "got an unexpected keyword argument 'model_type'" in str(e): # 手动修改配置,移除 'model_type' 参数 import json config_path = os.path.join(adapter_model_name, 'adapter_config.json') with open(config_path, 'r') as f: config = json.load(f) config.pop('model_type', None) with open(config_path, 'w') as f: json.dump(config, f) # 重新加载模型 model = PeftModel.from_pretrained( base_model, adapter_model_name, torch_dtype=torch.float16, use_auth_token=hf_token ) else: raise e # 设置 pad_token tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = tokenizer.pad_token_id # 切换到评估模式 model.eval() else: # 在函数内部导入需要的库 import torch # 生成提示 prompt = generate_prompt(instruction, input_text) inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs.get("attention_mask"), max_new_tokens=128, temperature=0.7, top_p=0.95, do_sample=True, ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) response = response.split("### Response:")[-1].strip() return response # 创建 Gradio 接口 iface = gr.Interface( fn=generate_response, inputs=[ gr.Textbox(lines=2, placeholder="请输入指令...", label="Instruction"), gr.Textbox(lines=2, placeholder="如果有额外输入,请在此填写...", label="Input (可选)") ], outputs="text", title="WooWoof AI 交互式聊天", description="基于 LLAMA 3.1 的大语言模型,支持指令和可选输入。", allow_flagging="never" ) # 启动 Gradio 接口 iface.launch()