{ "cells": [ { "cell_type": "markdown", "id": "55ec4c0c-65cc-4816-86df-c40b55f9c2d5", "metadata": {}, "source": [ "### Tracing\n", "\n", "Optionally, use [LangSmith](https://docs.smith.langchain.com/) for tracing (shown at bottom)" ] }, { "cell_type": "code", "execution_count": 1, "id": "68fed362-871a-46df-8ba0-579797ff2e9c", "metadata": {}, "outputs": [], "source": [ "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.smith.langchain.com\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = \"\"" ] }, { "cell_type": "markdown", "id": "c059c3a3-7f01-4d46-8289-fde4c1b4155f", "metadata": {}, "source": [ "## Configuration\n", "\n", "Decide to run locally and select LLM to use with Ollama." ] }, { "cell_type": "code", "execution_count": 2, "id": "2f4db331-c4d0-4c7c-a9a5-0bebc8a89c6c", "metadata": {}, "outputs": [], "source": [ "run_local = \"Yes\"\n", "local_llm = \"llama2-13b\"" ] }, { "cell_type": "code", "execution_count": 3, "id": "632ae5bb-8b63-43a8-bfb8-da05d1c1bde4", "metadata": {}, "outputs": [], "source": [ "# !pip install langchain_nomic" ] }, { "cell_type": "markdown", "id": "6e2b6eed-3b3f-44b5-a34a-4ade1e94caf0", "metadata": {}, "source": [ "## Index\n", "\n", "Let's index 3 blog posts." ] }, { "cell_type": "code", "execution_count": 5, "id": "d3f4d43f-eb93-4f7d-9cab-1ab3c7de6c6a", "metadata": {}, "outputs": [], "source": [ "# Download PDF file\n", "import os\n", "import requests\n", "import fitz # (pymupdf, found this is better than pypdf for our use case, note: licence is AGPL-3.0, keep that in mind if you want to use any code commercially)\n", "from tqdm.auto import tqdm\n", "# Get PDF document\n", "\n", "# Download PDF if it doesn't already exist\n", "if not os.path.exists(pdf_path):\n", " print(\"File doesn't exist, downloading...\")\n", "\n", " # The URL of the PDF you want to download\n", " url = \"https://pressbooks.oer.hawaii.edu/humannutrition2/open/download?type=pdf\"\n", "\n", " # The local filename to save the downloaded file\n", " filename = pdf_path\n", "\n", " # Send a GET request to the URL\n", " response = requests.get(url)\n", "\n", " # Check if the request was successful\n", " if response.status_code == 200:\n", " # Open a file in binary write mode and save the content to it\n", " with open(filename, \"wb\") as file:\n", " file.write(response.content)\n", " print(f\"The file has been downloaded and saved as {filename}\")\n", " else:\n", " print(f\"Failed to download the file. Status code: {response.status_code}\")\n", "else:\n", " print(f\"File {pdf_path} exists.\")" ] }, { "cell_type": "code", "execution_count": null, "id": "a5debec4-983b-462e-b871-81b8cf3dd33b", "metadata": {}, "outputs": [], "source": [ "# # Requires !pip install PyMuPDF, see: https://github.com/pymupdf/pymupdf\n", "# import fitz # (pymupdf, found this is better than pypdf for our use case, note: licence is AGPL-3.0, keep that in mind if you want to use any code commercially)\n", "# from tqdm.auto import tqdm # for progress bars, requires !pip install tqdm \n", "\n", "# def text_formatter(text: str) -> str:\n", "# \"\"\"Performs minor formatting on text.\"\"\"\n", "# cleaned_text = text.replace(\"\\n\", \" \").strip() # note: this might be different for each doc (best to experiment)\n", "\n", "# # Other potential text formatting functions can go here\n", "# return cleaned_text\n", "\n", "# # Open PDF and get lines/pages\n", "# # Note: this only focuses on text, rather than images/figures etc\n", "# def open_and_read_pdf(pdf_path: str) -> list[dict]:\n", "# \"\"\"\n", "# Opens a PDF file, reads its text content page by page, and collects statistics.\n", "\n", "# Parameters:\n", "# pdf_path (str): The file path to the PDF document to be opened and read.\n", "\n", "# Returns:\n", "# list[dict]: A list of dictionaries, each containing the page number\n", "# (adjusted), character count, word count, sentence count, token count, and the extracted text\n", "# for each page.\n", "# \"\"\"\n", "# doc = fitz.open(pdf_path) # open a document\n", "# pages_and_texts = \"\"\n", "# for page_number, page in tqdm(enumerate(doc)): # iterate the document pages\n", "# text = page.get_text() # get plain text encoded as UTF-8\n", "# text = text_formatter(text)\n", "# pages_and_texts+=text\n", "# # pages_and_texts.append({\"page_number\": page_number - 41, # adjust page numbers since our PDF starts on page 42\n", "# # \"page_char_count\": len(text),\n", "# # \"page_word_count\": len(text.split(\" \")),\n", "# # \"page_sentence_count_raw\": len(text.split(\". \")),\n", "# # \"page_token_count\": len(text) / 4, # 1 token = ~4 chars, see: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\n", "# # \"text\": text})\n", "# return pages_and_texts\n", "\n", "# pages_and_texts = open_and_read_pdf(pdf_path=pdf_path)\n", "# pages_and_texts[:100]" ] }, { "cell_type": "code", "execution_count": null, "id": "c19560ff-2808-406a-aa70-b8c4d303121e", "metadata": {}, "outputs": [], "source": [ "# !pip install fastembed" ] }, { "cell_type": "code", "execution_count": null, "id": "bb8b789b-475b-4e1b-9c66-03504c837830", "metadata": {}, "outputs": [], "source": [ "# from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter\n", "# from langchain_community.document_loaders import WebBaseLoader\n", "# from langchain_community.vectorstores import Chroma\n", "# from langchain_mistralai import MistralAIEmbeddings\n", "# # from langchain_nomic.embeddings import NomicEmbeddings\n", "# from langchain_community.embeddings import OllamaEmbeddings\n", "# # ollama_emb = \n", "# # Load\n", "# from langchain_community.embeddings.fastembed import FastEmbedEmbeddings\n", "\n", "# # # Split\n", "# # text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(\n", "# # chunk_size=500, chunk_overlap=100\n", "# # )\n", "\n", "# text_splitter = CharacterTextSplitter(\n", "# chunk_size=1000,\n", "# chunk_overlap=200,\n", "# separator=\"\\n\"\n", "# )\n", "\n", "\n", "# all_splits = text_splitter.create_documents(pages_and_texts)\n", "\n", "# # Embed and index\n", "# if run_local == \"Yes\":\n", "# embedding = FastEmbedEmbeddings(model_name=\"BAAI/bge-base-en-v1.5\",device=\"cuda\")\n", "\n", "# else:\n", "# embedding = MistralAIEmbeddings(mistral_api_key=mistral_api_key)\n", "\n", "# # Index\n", "# vectorstore = Chroma.from_documents(\n", "# documents=all_splits,\n", "# collection_name=\"rag-chroma\",\n", "# embedding=embedding,\n", "# )\n", "# retriever = vectorstore.as_retriever()" ] }, { "cell_type": "code", "execution_count": null, "id": "bc1efd13-576f-4bae-996b-81dd8f8863df", "metadata": {}, "outputs": [], "source": [ "import fitz # PyMuPDF\n", "def text_formatter(text: str) -> str:\n", " \"\"\"Performs minor formatting on text.\"\"\"\n", " cleaned_text = text.replace(\"\\n\", \" \").strip() # note: this might be different for each doc (best to experiment)\n", "\n", " # Other potential text formatting functions can go here\n", " return cleaned_text\n", "def extract_text_from_pdf(pdf_path):\n", " document = fitz.open(pdf_path)\n", " pages_and_texts = []\n", " for page_num in range(len(document)):\n", " page = document.load_page(page_num)\n", " text = page.get_text(\"text\")\n", " text = text_formatter(text)\n", " pages_and_texts.append(text)\n", " return pages_and_texts\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "829ad72b-d8ac-4c59-b1fe-9524692bdf80", "metadata": {}, "outputs": [], "source": [ "# Function to read a text file and split into a list\n", "def read_and_split_text(file_path, delimiter=\"\\t\\n\\n\",max_len= 10000):\n", " with open(file_path, 'r') as file:\n", " text = file.read()\n", " # Split the text based on the provided delimiter\n", " text_list = text.split(delimiter)\n", " res = []\n", " for te in text_list:\n", " if len(te)