import torch from openai import OpenAI import os model = "gpt-3.5-turbo" client = OpenAI( api_key=os.environ.get("openai_key"), ) def generate(text): message=[{"role": "user", "content": text}] response = client.chat.completions.create( model=model, messages = message, temperature=0.2, max_tokens=800, frequency_penalty=0.0 ) return response