from dotenv import load_dotenv from genai.client import Client from genai.credentials import Credentials from genai.schema import ( DecodingMethod, HumanMessage, ModerationHAP, ModerationHAPInput, ModerationHAPOutput, ModerationParameters, SystemMessage, TextGenerationParameters, ) # make sure you have a .env file under genai root with # GENAI_KEY= # GENAI_API= load_dotenv() def heading(text: str) -> str: """Helper function for centering text.""" return "\n" + f" {text} ".center(80, "=") + "\n" def chat_llm(model_id, prompt, sys_prompt): """ Usage: model_id = "mistralai/mixtral-8x7b-instruct-v01" # mistralai/mistral-7b-instruct-v0-2 prompt = "What is NLP and how it has evolved over the years?" sys_prompt = ""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something incorrectly. If you don't know the answer to a question, please don't share false information. " response = chat_llm(model_id, prompt, sys_prompt) :param model_id: bam model_id :param prompt: input text :param sys_prompt: system prompt :return: the llm response """ parameters = TextGenerationParameters( decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0.7, top_k=50, top_p=1 ) client = Client(credentials=Credentials.from_env()) print(heading("Generating a chat response")) response = client.text.chat.create( model_id=model_id, messages=[ SystemMessage( content=sys_prompt, ), HumanMessage(content=prompt), ], parameters=parameters, ) conversation_id = response.conversation_id print(f"Conversation ID: {conversation_id}") print(f"Request: {prompt}") print(f"Response: {response.results[0].generated_text}") return response.results[0].generated_text