import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import json # Load pre-trained model and tokenizer (replace with desired model if needed) model_name = "microsoft/DialoGPT-large" # Replace with your preferred model model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # Function to process user input and generate response def chat(message, history): # Preprocess user input input_ids = tokenizer(message, return_tensors="pt")["input_ids"] # Generate response with beam search to improve fluency generated_outputs = model.generate( input_ids, max_length=512, # Adjust max_length as needed for response length num_beams=5, # Experiment with num_beams for better phrasing no_repeat_ngram_size=2, # Prevent repetition in responses early_stopping=True, # Stop generation if response seems complete ) # Decode generated tokens to text response = tokenizer.batch_decode(generated_outputs, skip_special_tokens=True)[0] # Access and process JSON files (improved structure) json_files = { "fileone.json": "your_key_in_fileone", "filesecond.json": "your_key_in_filesecond", "filethird.json": "your_key_in_filethird", "filefourth.json": "your_key_in_filefourth", "filefifth.json": "your_key_in_filefifth", } if any(word in message.lower() for word in ["file", "data", "information"]): try: # Find the relevant JSON file based on keywords in message relevant_file = next( file for file, key in json_files.items() if key.lower() in message.lower() ) with open(relevant_file, "r") as f: data = json.load(f) relevant_info = data.get(json_files[relevant_file], "No relevant information found") response += f"\nHere's some information I found in {relevant_file}: {relevant_info}" except (FileNotFoundError, StopIteration): response += "\nCouldn't find the requested file or information." except json.JSONDecodeError: response += "\nError processing the JSON data." # Update history with current conversation (optional) # history.append([message, response]) # Uncomment if you want conversation history return response # Check Gradio version and handle catch_exceptions accordingly try: # Option 1: Use catch_exceptions if Gradio supports it interface = gr.Interface(chat, inputs="textbox", outputs="textbox", catch_exceptions=True) except TypeError: # If catch_exceptions is not supported # Option 2: Manual error handling within chat function def chat_with_error_handling(message, history): try: return chat(message, history) except Exception as e: return f"An error occurred: {str(e)}" interface = gr.Interface(chat_with_error_handling, inputs="textbox", outputs="textbox") # Launch the Gradio app and share link interface.launch(share=True)