inputs
stringlengths
312
52k
targets
stringlengths
1
3.1k
โŒ€
block_type
stringclasses
11 values
scenario
stringclasses
7 values
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microagents/ui/format.py def print_agent_statistics(agent): """ Print statistics for a given agent. """ print(f"๐Ÿ“Š Stats for {agent.purpose}:") stats = [ f"๐Ÿ” Evolve Count: {agent.evolve_count}", f"๐Ÿ’ป Code Executions: {agent.number_of_code_executions}", f"๐Ÿ‘ฅ Active Agents: {agent.active_agents}", f"๐Ÿ“ˆ Usage Count: {agent.usage_count}", f"๐Ÿ”๏ธ Max Depth: {agent.max_depth}", f"๐ŸŒŸ Depth: {agent.depth}", f"๐Ÿ› ๏ธ Working Agent: {agent.working_agent}", f"๐Ÿ“ Last Input: {agent.last_input}", f"๐Ÿšฆ Status: {agent.current_status}", f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}", f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}" ] print('\n'.join(stats)) # microagents/ui/logic.py def output_results(self): self.app.rlog.write("\n\nFinal Results:\n") for agent in self.manager.get_agents(): self.app.rlog.write(f"๐Ÿ“Š Stats for {agent.purpose} :") self.app.rlog.write(f"๐Ÿ” Evolve Count: {agent.evolve_count}") self.app.rlog.write(f"๐Ÿ’ป Code Executions: {agent.number_of_code_executions}") self.app.rlog.write(f"๐Ÿ‘ฅ Active Agents: {agent.active_agents}") self.app.rlog.write(f"๐Ÿ“ˆ Usage Count: {agent.usage_count}") self.app.rlog.write(f"๐Ÿ” Max Depth: {agent.max_depth}") self.app.rlog.write(f"๐ŸŒŸ Depth: {agent.depth}") self.app.rlog.write(f"๐Ÿ›  Working Agent::{agent.working_agent}") self.app.rlog.write(f"๐Ÿ“ Last Input: {agent.last_input}") self.app.rlog.write(f"๐Ÿšฆ Status: {agent.current_status}") self.app.rlog.write(f"\nPrompt for {agent.purpose}:") self.app.rlog.write(f"{agent.dynamic_prompt}\n") # microagents/gradio_ui/agent_manager.py def format_agent_info_details(self, agent: MicroAgent) -> dict: """ Format the information of a MicroAgent for display. """ return { "Purpose": agent.purpose, "System Prompt": agent.dynamic_prompt, "Last Input": agent.last_input, "Last Output": agent.last_output, "Last Conversation": agent.last_conversation, } """ import logging import uuid from integrations.openaiwrapper import OpenAIAPIWrapper from agents.agent_evaluation import AgentEvaluator from agents.agent_response import AgentResponse from agents.agent_similarity import AgentSimilarity from agents.response_extraction import ResponseExtraction from agents.agent_stopped_exception import AgentStoppedException from agents.response_handler import ResponseHandler from runtime.code_execution import CodeExecution from prompt_management.prompt_evolution import PromptEvolution from utils.utility import get_env_variable, time_function, log_exception logger = logging.getLogger() class MicroAgent: """ The MicroAgent class encapsulates the behavior of a small, purpose-driven agent that interacts with the OpenAI API. """ def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) : self.dynamic_prompt = initial_prompt self.purpose = purpose self.purpose_embedding = purpose_embedding self.depth = depth self.max_depth = max_depth self.usage_count = 0 self.working_agent = bootstrap_agent self.agent_lifecycle = agent_lifecycle self.openai_wrapper = openai_wrapper self.evolve_count = 0 self.number_of_code_executions = 0 self.current_status = None self.active_agents = {} self.last_input = "" self.last_output = "" self.last_conversation = "" self.stopped = False self.is_prime = is_prime self.stop_execution = False if parent: self.parent_id = parent.id if parent else None else: self.parent_id = None if parent_id: self.parent_id = parent_id if is_prime: self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b" else: if id: self.id = id else: self.id = str(uuid.uuid4()) <fim_suffix> self.agent_evaluator = AgentEvaluator(self.openai_wrapper) self.code_executor = CodeExecution() self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth) self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents) self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle) self.response_extractor = ResponseExtraction(self.openai_wrapper) self.response_handler = ResponseHandler(self) def update_status(self, status): """Update the agent's current status.""" self.check_for_stopped() self.current_status = status logger.info(f"Agent {self.purpose} status updated to: {status}") def update_active_agents(self, calling_agent, called_agent=None): """Update the tree view of active agents.""" if called_agent: self.active_agents[calling_agent] = called_agent else: self.active_agents.pop(calling_agent, None) logger.info(f"Active agents updated: {self.active_agents}") def set_agent_as_working(self): """Set the agent as a working agent.""" self.working_agent = True self.agent_lifecycle.save_agent(self) logger.info(f"Agent {self.purpose} set as working agent.") def get_children(self): """Get the children of the agent.""" return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id] def is_working_agent(self): return self.working_agent def set_agent_deleted(self): """Set the agent as deleted.""" self.working_agent = False self.current_status = "โŒ Deleted" self.stopped = True self.stop_execution = True self.agent_lifecycle.remove_agent(self) logger.info(f"Agent {self.purpose} set as deleted.") def check_for_stopped(self): """Check if the agent has been stopped.""" if self.stop_execution: self.current_status = "โŒ Stopped" if self.is_prime: self.agent_lifecycle.reset_all_agents() raise AgentStoppedException("Agent stopped.") def respond(self, input_text, evolve_count=0): """ Generate a response to the given input text. """ return self.response_handler.respond(input_text, evolve_count) def stop(self): """Stop the agent.""" self.stop_execution = True if not self.is_working_agent(): self.stopped = True def reset(self): """Reset the agent's stopped status.""" self.current_status = "" self.stop_execution = False def __eq__(self, other): if not isinstance(other, MicroAgent): return NotImplemented return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose) def __hash__(self): return hash((self.dynamic_prompt, self.purpose)) <fim_middle># Initialize components used by the agent
# Initialize components used by the agent
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) <fim_suffix> return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>result[url] = idf_score * numerator / denominator
result[url] = idf_score * numerator / denominator
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: <fim_suffix> else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>old[url] += score
old[url] += score
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} <fim_suffix> avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>idf_score = self.idf(kw)
idf_score = self.idf(kw)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() # microsearch/app/app.py def read_about(request: Request): return templates.TemplateResponse("about.html", {"request": request}) """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): <fim_suffix> self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: <fim_suffix> words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>self._documents[url] = content
self._documents[url] = content
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) <fim_suffix> result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl )
denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl )
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: <fim_suffix> idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>result = {}
result = {}
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() # microsearch/app/app.py def read_about(request: Request): return templates.TemplateResponse("about.html", {"request": request}) """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) <fim_suffix> return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>string_without_double_spaces = " ".join(string_without_punc.split())
string_without_double_spaces = " ".join(string_without_punc.split())
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py async def search_results(request: Request, query: str = Path(...)): results = engine.search(query) results = get_top_urls(results, n=5) return templates.TemplateResponse( "results.html", {"request": request, "results": results, "query": query} ) # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) <fim_suffix> def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>return url_scores
return url_scores
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py async def search_results(request: Request, query: str = Path(...)): results = engine.search(query) results = get_top_urls(results, n=5) return templates.TemplateResponse( "results.html", {"request": request, "results": results, "query": query} ) # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() # microsearch/app/app.py def read_about(request: Request): return templates.TemplateResponse("about.html", {"request": request}) """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents <fim_suffix> return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>n_kw = len(self.get_urls(kw))
n_kw = len(self.get_urls(kw))
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") <fim_suffix> if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>for word in words: self._index[word][url] += 1
for word in words: self._index[word][url] += 1
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): <fim_suffix> return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>for url, score in new.items(): if url in old: old[url] += score else: old[url] = score
for url, score in new.items(): if url in old: old[url] += score else: old[url] = score
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py async def search_results(request: Request, query: str = Path(...)): results = engine.search(query) results = get_top_urls(results, n=5) return templates.TemplateResponse( "results.html", {"request": request, "results": results, "query": query} ) # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} <fim_suffix> return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score)
for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score)
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] # microsearch/download_content.py def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--feed-path") return parser.parse_args() """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl <fim_suffix> return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator
for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] # microsearch/download_content.py async def fetch_content(session, url): async with session.get(url) as response: return await response.text() # microsearch/download_content.py def parse_feed(feed_url): try: feed = feedparser.parse(feed_url) return [entry.link for entry in feed.entries] except Exception as e: print(f"Error parsing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): if url in old: old[url] += score else: old[url] = score return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): <fim_suffix> def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>for url, content in documents: self.index(url, content)
for url, content in documents: self.index(url, content)
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>microsearch/src/microsearch/engine.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # microsearch/app/app.py def get_top_urls(scores_dict: dict, n: int): sorted_urls = sorted(scores_dict.items(), key=lambda x: x[1], reverse=True) top_n_urls = sorted_urls[:n] top_n_dict = dict(top_n_urls) return top_n_dict # microsearch/download_content.py def clean_content(html_content): soup = BeautifulSoup(html_content, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) cleaned_text = " ".join(chunk for chunk in chunks if chunk) return cleaned_text # microsearch/download_content.py async def process_feed(feed_url, session, loop): try: post_urls = await loop.run_in_executor(None, parse_feed, feed_url) tasks = [fetch_content(session, post_url) for post_url in post_urls] post_contents = await asyncio.gather(*tasks) cleaned_contents = [clean_content(content) for content in post_contents] return list(zip(post_urls, cleaned_contents)) except Exception as e: print(f"Error processing feed {feed_url}: {e}") return [] """ from collections import defaultdict from math import log import string def update_url_scores(old: dict[str, float], new: dict[str, float]): for url, score in new.items(): <fim_suffix> return old def normalize_string(input_string: str) -> str: translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation)) string_without_punc = input_string.translate(translation_table) string_without_double_spaces = " ".join(string_without_punc.split()) return string_without_double_spaces.lower() class SearchEngine: def __init__(self, k1: float = 1.5, b: float = 0.75): self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._documents: dict[str, str] = {} self.k1 = k1 self.b = b @property def posts(self) -> list[str]: return list(self._documents.keys()) @property def number_of_documents(self) -> int: return len(self._documents) @property def avdl(self) -> float: if not hasattr(self, "_avdl"): self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents) return self._avdl def idf(self, kw: str) -> float: N = self.number_of_documents n_kw = len(self.get_urls(kw)) return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1) def bm25(self, kw: str) -> dict[str, float]: result = {} idf_score = self.idf(kw) avdl = self.avdl for url, freq in self.get_urls(kw).items(): numerator = freq * (self.k1 + 1) denominator = freq + self.k1 * ( 1 - self.b + self.b * len(self._documents[url]) / avdl ) result[url] = idf_score * numerator / denominator return result def search(self, query: str) -> dict[str, float]: keywords = normalize_string(query).split(" ") url_scores: dict[str, float] = {} for kw in keywords: kw_urls_score = self.bm25(kw) url_scores = update_url_scores(url_scores, kw_urls_score) return url_scores def index(self, url: str, content: str) -> None: self._documents[url] = content words = normalize_string(content).split(" ") for word in words: self._index[word][url] += 1 if hasattr(self, "_avdl"): del self._avdl def bulk_index(self, documents: list[tuple[str, str]]): for url, content in documents: self.index(url, content) def get_urls(self, keyword: str) -> dict[str, int]: keyword = normalize_string(keyword) return self._index[keyword] engine = SearchEngine() <fim_middle>if url in old: old[url] += score else: old[url] = score
if url in old: old[url] += score else: old[url] = score
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor_utils/ner_dict.py def contains_letter_and_number(text: str): return text.isalnum() and not text.isalpha() and not text.isdigit() # nlm-ingestor/nlm_ingestor/ingestor/visual_ingestor/visual_ingestor.py def make_header_class(self, line_style): if line_style in self.header_styles: return self.line_style_classes[line_style] else: cloned_style = LineStyle( line_style[0], 'italic', line_style[2], 600, 'none', 'left' ) if cloned_style in self.header_styles: return self.line_style_classes[cloned_style] else: class_name = f"cls_{len(self.line_style_classes.keys())}" self.line_style_classes[cloned_style] = class_name self.class_line_styles[class_name] = cloned_style self.header_styles.append(cloned_style) return self.line_style_classes[cloned_style] # nlm-ingestor/nlm_ingestor/ingestor/visual_ingestor/visual_ingestor.py def check_special_line(block): for line_idx, line in enumerate(block["visual_lines"]): # print(line["text"]) if not "line_parser" in line or not line["line_parser"]["noun_chunks"]: line["line_parser"] = line_parser.Line(line["text"]).to_json() if not line["line_parser"]["noun_chunks"]: return False noun_chunk = non_alphanumeric_pattern.sub(' ', line["line_parser"]["noun_chunks"][0]).strip() text = non_alphanumeric_pattern.sub(' ', line["text"]).strip() upper_check = all([token[0].isupper() for token in text.split()]) bolded_text = line["line_style"][1] == "bold" or line["line_style"][3] > 400 large_text = line["line_style"][2] > 9 # print(line["text"]) # print(noun_chunk == text, bolded_text, large_text) if not ((upper_check or noun_chunk) and bolded_text and large_text): return False return True """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: <fim_suffix> # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>self.list_type = list_types[text[0]]
self.list_type = list_types[text[0]]
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: <fim_suffix> curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>line_set.add(line_without_numbers)
line_set.add(line_without_numbers)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx <fim_suffix> result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, }
block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, }
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False <fim_suffix> self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>self.length = len(self.text)
self.length = len(self.text)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() <fim_suffix> self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>n = self.check_numeric()
n = self.check_numeric()
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): <fim_suffix> self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>self.text = token
self.text = token
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) <fim_suffix> offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle>modified_sents = nltk_tokenzier.tokenize(modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def check_number_type(num): dollar = ( re.search(r"^[\(]*\$\d[\d\.\,)]*$", num) is not None or re.search(r"^[\(]*\d[\d\.\,)]*\$$", num) is not None ) percentage = ( re.search(r"^[\(]*\%\d[\d\.\,)]*$", num) is not None or re.search(r"^[\(]*\d[\d\.\,)]*\%$", num) is not None ) if dollar: return "dollar" if percentage: return "percent" else: return "num" # nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py def lookup_word(self, input_term): max_edit_distance_lookup = 2 suggestion_verbosity = Verbosity.CLOSEST # ignore_token = None ignore_token = "|".join(patterns.spell_check) suggestions = self.sym_spell.lookup( input_term, suggestion_verbosity, max_edit_distance_lookup, transfer_casing=False, ignore_token=ignore_token, ) # print(suggestions) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term # nlm-ingestor/nlm_ingestor/ingestor/visual_ingestor/visual_ingestor.py def should_ignore_line(self, all_p, is_page_footer, is_page_header, last_line_counts, line_idx, loc_key, lp_line, p, page_footers, page_headers, page_idx, box_style, page_visual_lines): if box_style[4] < 1: # Really small text return True, False if line_idx > len(all_p) - 2 or line_idx < 2: num_only = not_a_number_pattern.sub("", p.text).strip() do_ignore = True if line_idx < 2: remove_whole_numbers = integer_pattern.sub("", p.text).strip() if (len(remove_whole_numbers) == len(p.text) or not p.text.lower().startswith("page")) \ and lp_line.word_count > 1: do_ignore = False if 0 < len(num_only) < 4 and lp_line.alpha_count < 2 and not lp_line.dot_numbered_line and do_ignore: return True, False else: text_only = text_only_pattern.sub("", p.text).strip() if text_only in last_line_counts and last_line_counts[text_only] > 2 and \ not lp_line.last_word_is_stop_word and line_idx > len(all_p) - 2: return True, False if p.text: if p.text.startswith("Source:"): return True, False elif not len(single_char_pattern.sub("", p.text).strip()) and box_style[1] < 5: # Get rid of single letter text, which might be a water mark return True, False ignore, ignore_all_after = self.should_ignore( p.text, "header" if lp_line.is_header else None, ) if ignore: return True, False if ignore_all_after: # fix this return True, False if is_page_header and loc_key in page_headers: do_continue = True if len(page_visual_lines) > 0: # Sort using top sorted_vls = sorted(page_visual_lines, key=lambda vl: vl['box_style'][0]) if sorted_vls[0]['box_style'][0] < box_style[0]: # Check top # We have added a VL before this to the group, so don't discard this Header. do_continue = False if do_continue: page_idxs = page_headers[loc_key] if len(page_idxs) > 1 and page_idx > 0 and page_idx in page_idxs and not lp_line.dot_numbered_line: if HF_DEBUG: print(f"skipping header : {p.text}, {loc_key}, {page_idxs}") return True, False elif is_page_footer and loc_key in page_footers and not lp_line.dot_numbered_line: page_idxs = page_footers[loc_key] if len(page_idxs) > 1: if HF_DEBUG: print(f"skipping footer : {p.text}, {loc_key}") return True, False if box_style[4] < 1: # Check height # We are referring to some really small text here. if LINE_DEBUG: print(f"Ignoring really small line {p.text}.. ", box_style) return True, False if p.text in filter_out_pattern_list: return False, True else: return False, False """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: <fim_suffix> def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>self.is_table_row = False
self.is_table_row = False
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/processors.py def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) # nlm-ingestor/nlm_ingestor/ingestor_utils/word_splitter.py def best_match(i): candidates = enumerate(reversed(cost[max(0, i - self._maxword) : i])) return min( (c + self._word2cost.get(s[i - k - 1 : i].lower(), 9e999), k + 1) for k, c in candidates ) # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def make_header(str): header_text = str if "_" in header_text: header_text = " ".join(header_text.split("_")).title() elif header_text.islower(): header_text = header_text.capitalize() else: header_text = " ".join(XMLIngestor.camel_case_split(header_text)).title() return header_text """ import logging import os import string from symspellpy.symspellpy import SymSpell from symspellpy.symspellpy import Verbosity import nlm_ingestor.ingestor as ingestor from nlm_ingestor.ingestor import patterns logger = logging.getLogger(__name__) class SpellUtil: def __init__(self): self.sym_spell = SymSpell(2, 7) dictionary_path = os.path.join( os.path.dirname(os.path.abspath(ingestor.__file__)), "../ingestor_models/symspell/frequency_dictionary_en_82_765.txt", ) bigram_path = os.path.join( os.path.dirname(os.path.abspath(ingestor.__file__)), "../ingestor_models/symspell/frequency_dictionary_en_82_765.txt", ) if not self.sym_spell.load_dictionary( dictionary_path, term_index=0, count_index=1, ): logging.error(f"Dictionary file not found: {dictionary_path}") return if not self.sym_spell.load_bigram_dictionary( bigram_path, term_index=0, count_index=2, ): logger.error(f"Bigram dictionary file not found: {bigram_path}") return def lookup_word(self, input_term): max_edit_distance_lookup = 2 suggestion_verbosity = Verbosity.CLOSEST # ignore_token = None ignore_token = "|".join(patterns.spell_check) suggestions = self.sym_spell.lookup( input_term, suggestion_verbosity, max_edit_distance_lookup, transfer_casing=False, ignore_token=ignore_token, ) # print(suggestions) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term # def lookup_sentence(self, input_term): def lookup_compound(self, input_term): max_edit_distance_lookup = 2 suggestions = self.sym_spell.lookup_compound( input_term, max_edit_distance_lookup, transfer_casing=True, ignore_non_words=True, ) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term def segment(self, input_term): is_mixed_case_term = not input_term.islower() if is_mixed_case_term: input_term = input_term.lower() <fim_suffix> corrected_string = suggestion.corrected_string if is_mixed_case_term: corrected_string = string.capwords(corrected_string) return corrected_string <fim_middle>suggestion = self.sym_spell.word_segmentation(input_term)
suggestion = self.sym_spell.word_segmentation(input_term)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] <fim_suffix> def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle>return sents
return sents
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) <fim_suffix> if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line]
if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line]
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers <fim_suffix> else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>if not line_type == "list_item": line_type = "para"
if not line_type == "list_item": line_type = "para"
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/processors.py def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) # nlm-ingestor/nlm_ingestor/ingestor_utils/word_splitter.py def best_match(i): candidates = enumerate(reversed(cost[max(0, i - self._maxword) : i])) return min( (c + self._word2cost.get(s[i - k - 1 : i].lower(), 9e999), k + 1) for k, c in candidates ) # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def make_header(str): header_text = str if "_" in header_text: header_text = " ".join(header_text.split("_")).title() elif header_text.islower(): header_text = header_text.capitalize() else: header_text = " ".join(XMLIngestor.camel_case_split(header_text)).title() return header_text """ import logging import os import string from symspellpy.symspellpy import SymSpell from symspellpy.symspellpy import Verbosity import nlm_ingestor.ingestor as ingestor from nlm_ingestor.ingestor import patterns logger = logging.getLogger(__name__) class SpellUtil: def __init__(self): self.sym_spell = SymSpell(2, 7) dictionary_path = os.path.join( os.path.dirname(os.path.abspath(ingestor.__file__)), "../ingestor_models/symspell/frequency_dictionary_en_82_765.txt", ) bigram_path = os.path.join( os.path.dirname(os.path.abspath(ingestor.__file__)), "../ingestor_models/symspell/frequency_dictionary_en_82_765.txt", ) if not self.sym_spell.load_dictionary( dictionary_path, term_index=0, count_index=1, ): logging.error(f"Dictionary file not found: {dictionary_path}") return if not self.sym_spell.load_bigram_dictionary( bigram_path, term_index=0, count_index=2, ): logger.error(f"Bigram dictionary file not found: {bigram_path}") return def lookup_word(self, input_term): max_edit_distance_lookup = 2 suggestion_verbosity = Verbosity.CLOSEST # ignore_token = None ignore_token = "|".join(patterns.spell_check) suggestions = self.sym_spell.lookup( input_term, suggestion_verbosity, max_edit_distance_lookup, transfer_casing=False, ignore_token=ignore_token, ) # print(suggestions) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term # def lookup_sentence(self, input_term): def lookup_compound(self, input_term): max_edit_distance_lookup = 2 suggestions = self.sym_spell.lookup_compound( input_term, max_edit_distance_lookup, transfer_casing=True, ignore_non_words=True, ) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term def segment(self, input_term): is_mixed_case_term = not input_term.islower() if is_mixed_case_term: input_term = input_term.lower() suggestion = self.sym_spell.word_segmentation(input_term) corrected_string = suggestion.corrected_string <fim_suffix> return corrected_string <fim_middle>if is_mixed_case_term: corrected_string = string.capwords(corrected_string)
if is_mixed_case_term: corrected_string = string.capwords(corrected_string)
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: <fim_suffix> except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0
if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) <fim_suffix> logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type
if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) <fim_suffix> self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2
if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def check_number_type(num): dollar = ( re.search(r"^[\(]*\$\d[\d\.\,)]*$", num) is not None or re.search(r"^[\(]*\d[\d\.\,)]*\$$", num) is not None ) percentage = ( re.search(r"^[\(]*\%\d[\d\.\,)]*$", num) is not None or re.search(r"^[\(]*\d[\d\.\,)]*\%$", num) is not None ) if dollar: return "dollar" if percentage: return "percent" else: return "num" # nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py def lookup_word(self, input_term): max_edit_distance_lookup = 2 suggestion_verbosity = Verbosity.CLOSEST # ignore_token = None ignore_token = "|".join(patterns.spell_check) suggestions = self.sym_spell.lookup( input_term, suggestion_verbosity, max_edit_distance_lookup, transfer_casing=False, ignore_token=ignore_token, ) # print(suggestions) # for suggestion in suggestions: # print("{}, {}, {}".format(suggestion.term, suggestion.distance, # suggestion.count)) if len(suggestions) > 0: return suggestions[0].term else: return input_term # nlm-ingestor/nlm_ingestor/ingestor/visual_ingestor/visual_ingestor.py def should_ignore_line(self, all_p, is_page_footer, is_page_header, last_line_counts, line_idx, loc_key, lp_line, p, page_footers, page_headers, page_idx, box_style, page_visual_lines): if box_style[4] < 1: # Really small text return True, False if line_idx > len(all_p) - 2 or line_idx < 2: num_only = not_a_number_pattern.sub("", p.text).strip() do_ignore = True if line_idx < 2: remove_whole_numbers = integer_pattern.sub("", p.text).strip() if (len(remove_whole_numbers) == len(p.text) or not p.text.lower().startswith("page")) \ and lp_line.word_count > 1: do_ignore = False if 0 < len(num_only) < 4 and lp_line.alpha_count < 2 and not lp_line.dot_numbered_line and do_ignore: return True, False else: text_only = text_only_pattern.sub("", p.text).strip() if text_only in last_line_counts and last_line_counts[text_only] > 2 and \ not lp_line.last_word_is_stop_word and line_idx > len(all_p) - 2: return True, False if p.text: if p.text.startswith("Source:"): return True, False elif not len(single_char_pattern.sub("", p.text).strip()) and box_style[1] < 5: # Get rid of single letter text, which might be a water mark return True, False ignore, ignore_all_after = self.should_ignore( p.text, "header" if lp_line.is_header else None, ) if ignore: return True, False if ignore_all_after: # fix this return True, False if is_page_header and loc_key in page_headers: do_continue = True if len(page_visual_lines) > 0: # Sort using top sorted_vls = sorted(page_visual_lines, key=lambda vl: vl['box_style'][0]) if sorted_vls[0]['box_style'][0] < box_style[0]: # Check top # We have added a VL before this to the group, so don't discard this Header. do_continue = False if do_continue: page_idxs = page_headers[loc_key] if len(page_idxs) > 1 and page_idx > 0 and page_idx in page_idxs and not lp_line.dot_numbered_line: if HF_DEBUG: print(f"skipping header : {p.text}, {loc_key}, {page_idxs}") return True, False elif is_page_footer and loc_key in page_footers and not lp_line.dot_numbered_line: page_idxs = page_footers[loc_key] if len(page_idxs) > 1: if HF_DEBUG: print(f"skipping footer : {p.text}, {loc_key}") return True, False if box_style[4] < 1: # Check height # We are referring to some really small text here. if LINE_DEBUG: print(f"Ignoring really small line {p.text}.. ", box_style) return True, False if p.text in filter_out_pattern_list: return False, True else: return False, False """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): <fim_suffix> def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False
if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown <fim_suffix> if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle>for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1
for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) <fim_suffix> # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle>for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text)
for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text)
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() <fim_suffix> if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle>for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line]
for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line]
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") <fim_suffix> # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle># if line_type == "list_item":
# if line_type == "list_item":
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now <fim_suffix> # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle># if len(span.split()) >= 10:
# if len(span.split()) >= 10:
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False <fim_suffix> self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle># year does not count as a number
# year does not count as a number
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph <fim_suffix> for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle># edge case for html and markdown
# edge case for html and markdown
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue <fim_suffix> modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle># cut org_text based on lengths of modified_sent
# cut org_text based on lengths of modified_sent
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: <fim_suffix> # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle># commit the line and start a new line
# commit the line and start a new line
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] <fim_suffix> # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle># skip this logic when span is too big? disabled for now
# skip this logic when span is too big? disabled for now
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) <fim_suffix> if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle># if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/text_ingestor.py def blocks_to_json(page_blocks): results = [] block_count = 0 for page_idx, blocks in enumerate(page_blocks): result = [] block_start = block_count header_block_idx = -1 header_block_text = "" for block_idx_in_page, block in enumerate(blocks): if block["block_text"]: block_sents = utils.sent_tokenize(block["block_text"]) # header_block_idx = block["header_block_idx"] if block["block_type"] == "header": header_block_idx = block["block_idx"] header_block_text = block["block_text"] result.append( { "block_text": block["block_text"], "block_idx": block["block_idx"], "block_sents": block_sents, "block_type": block["block_type"], "header_block_idx": block_start + header_block_idx, "page_idx": page_idx, "block_idx_in_page": block_start + block_idx_in_page, "header_text": header_block_text, "text_group_start_idx": block["text_group_start_idx"], "block_list": block["block_list"], "level":0, "block_class": block["block_class"] if "block_class" in block else {} }, ) block_count += 1 results.append(result) return results # nlm-ingestor/nlm_ingestor/ingestor/styling_utils.py def tops_2_dict(p_items): tops_2_info = defaultdict(list) idx_2_top = {} for p_idx, p_item in enumerate(p_items): if not p_item.text.strip(): continue style_str = p_item.attrs.get("style", "") if not style_str: continue # do not strip text as trailing white-space is used as a features text = unicodedata.normalize("NFKD", p_item.text) style = get_p_styling_dict(style_str) start_y = style["start_y"] tops_2_info[round(start_y, 0)].append((p_idx, text, style)) idx_2_top[p_idx] = round(start_y, 0) # print(tops_2_info) return tops_2_info, idx_2_top # nlm-ingestor/nlm_ingestor/ingestor/table_parser.py def __init__(self, infos): self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.INFO) self.tables = {} self.two_column_table_idx = set() self.resolved_tables = set() if not infos: return table_infos = [] table_start_idx = None for idx, info in enumerate(infos): if info.get("is_table_start", False) and not info.get("has_merged_cells", False): self.logger.debug(f"Found table start from match_idx:{idx}") table_start_idx = idx table_infos.append(info) elif table_start_idx is not None and info.get("is_table_end", False): table_infos.append(info) self.logger.debug(f"Table ends with match_idx:{idx}") # resolve table try: df = self.resolve_table_from_infos(table_infos) if isinstance(df, pd.DataFrame): self.logger.info( f"Found table at match_idx:{idx} of shape {df.shape}", ) self.tables[table_start_idx] = df if ( df.shape[1] == 1 and df.columns[0] == "_UNKNOWN_COLUMN_1_" and df.index.name == "_UNKNOWN_COLUMN_0_" ): for info_idx in range(len(table_infos)): self.two_column_table_idx.add(idx - info_idx) self.resolved_tables.add(table_infos[0]["table_idx"]) else: self.logger.error( f"Found table at match_idx:{idx} but failed to parse\n{table_infos[:2]}", ) except Exception: self.logger.error( f"Failed to parse table:\n{table_infos[:2]}", exc_info=True, ) # reset table_infos = [] table_start_idx = None elif table_start_idx: table_infos.append(info) """ import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("โ€™s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "๏ฟฝ\\(*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\)'\"โ€”")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": <fim_suffix> if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "๏ฟฝ\\*,.?โ€ข\\โžขฦ’๏‚ทโ€“\\'\"โ€”": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["โ€", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["โ€", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:] <fim_middle># running_line = running_line[1:].lstrip()
# running_line = running_line[1:].lstrip()
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) <fim_suffix> for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle># do not break bracket
# do not break bracket
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/line_parser.py def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # nlm-ingestor/nlm_ingestor/ingestor/xml_ingestor.py def traverse(parent, level, blocks): for child in parent: # handle cases when there's only a <country /> tag if not child.text: continue if len(list(child)) > 0: # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) traverse(child, level + 1, blocks) else: # print("\t"*(level + 1), child.text) if not title and child.tag.lower().find("title") != -1: self.title = child.text if child.tag != "textblock": # print("\t" * (level), "Header", child.tag) header_text = XMLIngestor.make_header(child.tag) # header_text = " ".join(child.tag.split("_")).title() header_block = { "block_idx": len(blocks), "page_idx": 0, "block_text": header_text, "block_type": "header", "block_class": "nlm-text-header", "header_block_idx": 0, "level": level, } subheader = " ".join([child.attrib[c] for c in child.attrib]) if subheader: header_block["block_text"] += " " + subheader blocks.append(header_block) else: level -= 1 lines = child.text.split("\n") # print("\t" * (level + 1), "======") # for line in lines: # print("\t" * (level + 1), line) # print("\t" * (level + 1), "======") col_blocks = processors.clean_lines(lines, xml=True) header_text = blocks[-1]["block_text"] has_header = False for block in col_blocks: # print("\t" * (level + 1), block["block_text"]) inline_header = has_header and block["block_type"] == "para" block["header_text"] = para_header if inline_header else header_text indent_offset = 2 if inline_header else 1 block["level"] = level + indent_offset block["block_idx"] = len(blocks) block["page_idx"] = 0 block["block_sents"] = sent_tokenize(block["block_text"]) block["block_class"] = "nlm-text-body" block["level_chain"] = ( [title, header_text] if title else [header_text] ) if len(col_blocks) == 1: block["block_type"] = "para" blocks.append(block) if block["block_type"] == "header": has_header = True para_header = block["block_text"] # nlm-ingestor/nlm_ingestor/ingestor_utils/parsing_utils.py def find_potential_gaps(gap_count): """ This function checks if a table row can be formed from the current table row spacing scheme. This is for edge cases when tika doesn't properly chunk the cells of a line """ possible_gaps = 0 min_gap = min(gap_count) gap_threshold = [] for gap_size in gap_count: if gap_size > (min_gap * 3): gap_threshold.append(gap_size) possible_gaps += gap_count[gap_size] if len(gap_threshold): return possible_gaps, min(gap_threshold) # suggested splits return [], 0 """ import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[โ€โ€œ"โ€˜โ€™\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 <fim_suffix> if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info<fim_middle>while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1
while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1
WHILE
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() <fim_suffix> except Exception as e: logging.error(e) self.num_digits = 0 def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0
try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def get_row1(row): orignal_row = row words = row.split(" ") cells = [] try: row = processors_utils.super_replace(row, ["(", ")", ",", "$", "%"], "") tags = nltk.pos_tag(list(filter(None, row.split(" ")))) except Exception as e: logging.error(e) return [orignal_row] # "" strn = "" for i in range(len(tags)): # previous check tag = tags[i][1] word = words[i].lstrip().rstrip() proc_word = processors_utils.super_replace(word, ["(", ")", ",", "$", "%"], "") if len(word) & len(proc_word.replace(" ", "")): # print(proc_word) start_tag = nltk.pos_tag(proc_word[0])[0][1] end_tag = nltk.pos_tag(proc_word[-1])[0][1] else: start_tag = "CD" end_tag = "CD" if ((tag == "CD") | (tag == ":")) and ( (tag == ":") | ((start_tag == "CD") and (end_tag == "CD")) ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) strn = "" elif ( ((start_tag == "CD") and (end_tag == "CD")) & (word != "$") & (word == "%") ): cells.append(strn.strip()) cells.append(word.lstrip().rstrip()) else: strn += word.lstrip().rstrip() + " " if type(cells) == str: cells = [cells] return cells # nlm-ingestor/nlm_ingestor/file_parser/tika_parser.py def find_tika_header(fp): try: with open(fp) as file: file_data = file.read() soup = BeautifulSoup(file_data, "html.parser") # print(str(soup.find_all('head')[0])) head = soup.find_all("head") return "org.apache.tika.parser" in str(head[0]) except Exception as e: logging.error(e) return False # nlm-ingestor/nlm_ingestor/ingestor/table_builder.py def format_tables(blocks_df): # columns block_text block_sents block_type # identify all tables in df table_indexes = blocks_df[blocks_df.block_type == "table_row"].index # if none are found if len(table_indexes) == 0: return blocks_df # group tables tables = group_tables(table_indexes) invalid = [] idx = [] for i in range(len(tables)): if len(tables[i]) < 2: invalid.append(i) else: idx.append(i) if len(invalid): blocks_df.loc[ np.concatenate(np.array(tables)[np.array(invalid)], axis=0), "block_type", ] = "para" table_rows = blocks_df[blocks_df.block_type == "table_row"] table_list = [] # print(table_rows) for table_idx in idx: table_idx = tables[table_idx] # print(table_rows.loc[table_idx].values,"\n") table = [] for row_idx, row in table_rows.loc[table_idx].iterrows(): table += [list(filter(None, get_row(row["block_text"].rstrip())))] # check if table is uniform table_cell_counts = [] if len(table) and (len(table[0])): table_cell_counts = [len(row) for row in table] try: cell_count = mode(table_cell_counts) except Exception as e: logging.error(e) cell_count = min(table_cell_counts) # non uniform row if (sum(table_cell_counts) % len(table[0])) and (cell_count): new_table = [] for row in table: # multiple rows in row if (len(row) > cell_count) and (len(row) % cell_count == 0): rows = int(len(row) / cell_count) for new_row in range(rows): new_row += 1 new_table_row = row[ new_row * cell_count - cell_count : new_row * cell_count ] new_table.append(new_table_row) else: new_table.append(row) table_list.append(new_table) else: table_list.append(table) else: table_list.append(table) replace = [] # check for valid tables if len(idx): for i in np.array(tables)[np.array(idx)]: replace.append(i) for i in range(len(replace)): blocks_df = blocks_df.drop(replace[i]) blocks_df.loc[replace[i][0]] = { "block_type": "table", "block_sents": table_list[i], "block_text": table_list[i], } return blocks_df.sort_index().reset_index(drop=True) else: return blocks_df """ import datetime import logging import math import re import string from nltk.corpus import stopwords from .patterns import abbreviations from .patterns import states from .patterns import states_abbreviations from .styling_utils import mode_of_list try: stop_words = set(stopwords.words("english")) except Exception as e: logging.error(e) import nltk stopwords = nltk.download("stopwords") stop_words = set(stopwords.words("english")) stop_words.add("per") continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~" list_chars = [ "โ€ข", "โžข", "*", "ฦ’", "๏‚ท", "๏‚ง", "๏ƒ˜", "๏ฎ", "ยป", "โ˜", "ยท", "๏ฟฝ", "โ–ช", "โ–ช", "โ—‹", "๔€ธ", "โ€“", ] list_types = { "โ€ข": "circle", "โžข": "wide_symbol_arrow", "*": "star", "ฦ’": "f", "๏‚ท": "clock", "๏‚ง": "small_square", "๏ƒ˜": "narrow_symbol_arrow", "๏ฎ": "large_square", "ยป": "double_arrow", "โ˜": "hollow_square", "ยท": "circle", "๏ฟฝ": "special_char", "โ–ช": "very_small_square", "โ–ช": "very_small_square", "โ—‹": "hollow_circle", "๔€ธ": "hollow_squere", "โ€“": "dash", "โ€’": "another-dash", "ฬถ": "underscore", } unicode_list_types = { "\\uf0b7": "โ€ข", "\\uf0fc": "๏ƒผ", } footnote_types = { "ยฉ" } ambiguous_list_chars = ["+", "-"] units = ["acres", "miles", "-"] # - could represent a null value in a row punctuations = string.punctuation + "โ€œ" start_quotations = ["'", '"', "โ€œ"] end_quotations = ["'", '"', "โ€"] """ Quote Pattern details: \\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly. ["โ€œ\'] ==> Quote patterns (?!\\D\\s) ==> Negative Lookahead for single character following the quote. Helps in removing words like Macy's, don't ... (?!\\d+) ==> Negative Lookahead for one or more digits following the pattern. Helps in removing words like '19, '2019 (.*?)[,;.]?[โ€"\'] ==> Match all other data. """ # Add / Modify Quotation pattern in ingestor_utils/utils.py also. quote_pattern = re.compile( r'(?:(?<=\W)|(?<=^))["โ€œโ€˜โ€™\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[โ€"โ€˜โ€™\']+', ) # (r'["โ€œ\'](.*?)[,;.]?[โ€"\']') single_char_pattern = re.compile(r'[a-zA-Z]') multi_char_pattern = re.compile(r'[a-zA-Z]+') roman_number_pattern = re.compile(r'[ixvIXV]+$') ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"โ€œโ€˜โ€™โ€\'\s]*$") conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"] class Word: def __init__(self, token): self.text = token self.is_percent = False self.is_number = False self.is_year = False # year does not count as a number self.is_dollar = False self.is_million = False self.is_billion = False self.is_thousand = False self.is_date_entry = False self.is_negative = False self.length = len(self.text) self.is_stop_word = self.text.lower() in stop_words self.is_number_range = False self.parts = [] text_without_punct = self.text while ( len(text_without_punct) > 1 and (text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations) ): text_without_punct = text_without_punct[0:-1] # remove leading unbalancced punctuations while ( len(text_without_punct) > 1 and (text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations) ): text_without_punct = text_without_punct[1:] self.text_without_punct = text_without_punct self.is_noun = self.text_without_punct[0].isupper() n = self.check_numeric() self.check_date() try: if n: n = round(float(n)) if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 2 self.num_digits = digits if digits == 4 and self.text.replace(",", "") == self.text: self.is_year = True self.is_number = False else: self.num_digits = 0 <fim_suffix> def check_date(self): if "/" in self.text or "-" in self.text: text = self.text.replace("/", "-") date_patterns = [ "%b-%d", "%B-%d", "%B-%d-%y", "%B-%d-%Y", "%b-%d-%Y", "%b-%d-%y", "%m-%d", "%m-%d-%y", "%m-%d-%Y", ] for pat in date_patterns: try: datetime.datetime.strptime(text, pat) self.is_date_entry = True return except ValueError: pass else: self.is_date_entry = False def check_numeric(self): word = self.text.lower() if not word.isalpha(): if word.isprintable(): if not word.isnumeric(): if word.startswith("(") and word.endswith(")"): word = word[1:-1] if word.startswith("-"): self.is_negative = True word = word[1:] if word.startswith("$"): self.is_dollar = True word = word[1:] elif word.endswith("$"): self.is_dollar = True word = word[0:-1] elif word.endswith("%"): self.is_percent = True word = word[0:-1] elif word.endswith("m"): self.is_million = True elif word.endswith("bn"): self.is_billion = True if word.startswith("(") and word.endswith(")"): word = word[1:-1] word = word.replace(",", "") if word.isnumeric() or word.replace(".", "", 1).isnumeric(): self.is_number = True parts = word.split("-") if ( len(parts) == 2 and parts[0].isnumeric() and parts[1].isnumeric() ): self.is_number_range = True self.parts = parts else: self.is_number = True if self.is_number: numeric_part = word return numeric_part class Line: def __init__( self, line_str, text_list=[], style_dict={}, page_details={}, noun_chunk_ending_tokens=[], ): self.text = line_str.strip() self.visual_line = VisualLine(text_list, style_dict, page_details) self.words = [] self.is_independent = False self.is_header = False self.is_header_without_comma = False self.noun_chunks = [] self.quoted_words = quote_pattern.findall(self.text) self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens} self.parse_line() def check_header(self): # Section X, Article Y, Note 1 etc. first_word_header = self.first_word.lower() in ["section", "article", "note"] # If there are a certain percentage of title words (first letter capitalize) title_ratio = ( self.title_word_count / self.eff_word_count if self.eff_word_count > 0 else 1.0 ) # print(self.title_word_count, self.eff_word_count, title_ratio) # Section 1 is a header but Section 1: Hello 3 is not has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10 has_header_structure = ( (first_word_header or has_enough_titles) and self.number_count == 1 ) or self.numbered_line or self.text.isupper() # has_header_structure = has_header_structure and self.eff_word_count < last_word_number = ( self.last_word.lower() in units or self.last_word_number and not has_header_structure ) last_word_date = self.last_word_date and not has_header_structure # Find lines ending with sentence delimiter. But exclude text like "L.P." ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None sentence_structure = self.ends_with_period and not ( has_header_structure and title_ratio > 0.9 ) and ends_with_delim last_letter_is_punctuation = ( self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and ends_with_delim ) self.is_header_without_comma = ( not sentence_structure and not self.has_list_char and not self.first_char in footnote_types and has_enough_titles and not last_word_number and ( self.number_count == 0 or (has_header_structure and self.number_count <= 1) ) and not self.has_continuing_chars and not last_word_date and self.first_word_title and not self.last_word_is_stop_word and not self.is_zipcode_or_po and not last_letter_is_punctuation and not "://" in self.text # url pattern ) self.is_header = self.is_header_without_comma and \ ((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True) def check_ends_with_period(self): # punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.'] last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."] self.ends_with_period = self.last_char in ["."] and not last_word_is_title def check_table_row(self): if not self.is_header: value_count = ( self.number_count + self.dollar_count + self.pct_count + self.text.count(" - ") ) word_symbols = self.word_count - self.dollar_sign_count if word_symbols == 0: word_symbols = 1 word_ratio = ( value_count + self.title_word_count + self.date_entry_count ) / word_symbols self.is_table_row = ( ( (value_count > 0 or self.date_entry_count > 0) and word_ratio > 0.7 and not self.ends_with_period and not self.is_zipcode_or_po ) and not self.last_word_is_stop_word or ("...." in self.text) ) else: self.is_table_row = False def check_list_item(self): text = self.text.strip() self.has_list_char = text[0] in list_types.keys() # if not self.has_list_char and text[0] in ambiguous_list_chars: # self.has_list_char = text[1:].strip()[0].isalpha() self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$" if self.is_list_item: self.list_type = list_types[text[0]] # matches 1.1 1.2.1 1 etc. def check_numbered_line(self, word): trunc_word = word ends_with_parens = word.endswith(")") number_end_char = word.endswith(".") or ends_with_parens number_start_char = word.startswith("(") if number_start_char and not ends_with_parens: return False if word[-1] in ["%", "$", ","]: return False if number_end_char: trunc_word = word[:-1] if number_start_char: trunc_word = trunc_word[1:] # To handle scenarios like (ii)(A) if ")(" in trunc_word: trunc_word = trunc_word.split(")(")[0] parts = trunc_word.split(".") self.integer_numbered_line = False self.roman_numbered_line = False self.letter_numbered_line = False self.dot_numbered_line = False mixed_list_items = False max_digits = 2 max_roman = 6 for idx, part in enumerate(parts): # print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0) if len(part) <= max_digits: # (1), (2), (3) self.integer_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(")") ) # 1. 2. 3. self.dot_numbered_line = part.isdigit() and ( len(parts) > 1 or word.endswith(".") ) # a. b. c. or a) b) c) # idx > 0 for patterns like 10.a # a1 b1 c1 etc. self.letter_numbered_line = ( True if single_char_pattern.match(part) and ( (number_end_char and len(part) == 1 and len(parts) == 1) or multi_char_pattern.sub("", part).isdigit() or idx > 0 ) else False ) if len(part) <= max_roman: # xi, i, iv self.roman_numbered_line = ( True if roman_number_pattern.match(part) and idx == 0 else False ) if part.endswith(")") and part[0].isalnum() and "(" in part: mixed_list_items = True # else: # self.integer_numbered_line = False # A-1 # self.letter_numbered_line = ( # True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False # ) self.numbered_line = ( self.integer_numbered_line or self.roman_numbered_line or self.letter_numbered_line or self.dot_numbered_line ) and not mixed_list_items if not self.numbered_line: break if self.numbered_line: self.start_number = trunc_word self.line_without_number = self.text[len(word) + 1 :] self.full_number = self.text[:len(word)] # check if line is part of address def check_zipcode_or_pobox(self): # check if line matches format P.O. box xxxxx pobox = ( self.word_count == 3 and self.last_word_number and self.first_word.lower() in ["po", "p.o", "p.o."] ) # check if line is last part of address, matching format "city, state zipcode" zipcode = ( self.word_count < 7 # ensure line is standalone address, not part of larger sentence and ( self.contains_state # line contains comma followed by state name or abbreviation # line ends in zipcode, with format xxxxx or xxxxx-xxxx and ( (self.last_word_number or self.last_word[-4:].isdigit()) and ( (len(self.last_word) == 10 and self.last_word[-5] == "-") or len(self.last_word) == 5 ) ) and not self.ends_with_period ) ) self.is_zipcode_or_po = pobox or zipcode def set_line_type(self): line_type = "para" if self.is_table_row: line_type = "table_row" elif self.is_header: line_type = "header" elif self.is_list_item or self.numbered_line: line_type = "list_item" else: line_type = "para" self.line_type = line_type def parse_line(self): self.words = [] self.title_word_count = 0 self.alpha_count = 0 self.list_type = "" self.integer_numbered_line = False self.roman_numbered_line = False self.dot_numbered_line = False self.numbered_line = False self.stop_word_count = 0 self.dollar_count = 0 self.pct_count = 0 self.number_count = 0 self.last_word_number = False self.first_word_title = False self.letter_numbered_line = False self.ends_with_hyphen = False self.last_word_date = False self.is_reference_author_name = False self.date_entry_count = 0 self.last_word_is_stop_word = False # self.last_word in self.stopwords self.hit_colon = False self.is_zipcode_or_po = False self.contains_state = False self.addresses = [] # todo - this is a stopgap solution, need to make it more efficient tokens = self.text.split() self.length = len(self.text) self.word_count = len(tokens) self.dollar_sign_count = tokens.count("$") last_idx = self.word_count - 1 first_alpha_found = False prev_token_comma = False self.eff_length = 0 single_letter_word_count = 0 noun_chunk_buf = [] if self.length == 0: return for idx, token in enumerate(tokens): if token in unicode_list_types.keys(): token = unicode_list_types[token] if token.__contains__(":"): self.hit_colon = True # remove punctuation unless (word) or unless it is the first token or if it has colon last_char = token[-1] # remove punctuation unless (word) or unless it is the first token if ( (token[-1] in string.punctuation or token[-1] in end_quotations) and not (token[0] in string.punctuation or token[0] in start_quotations) and (not idx == 0 or token[-1] == ":") ): token = token[0:-1] if len(token) == 0: continue # if prev token contained comma, check if current token is state name if prev_token_comma and ( token.lower() in states or token.lower() in states_abbreviations ): self.contains_state = True prev_token_comma = False if prev_token_comma: prev_token_comma = False if last_char == ",": prev_token_comma = True if idx == 0 and not token.lower() == "i" and not token.lower() == "a": self.check_numbered_line(token) if token.istitle() or token.isupper(): # and not self.hit_colon: self.title_word_count = self.title_word_count + 1 if token.isalpha(): # if not self.hit_colon: self.alpha_count = self.alpha_count + 1 if not first_alpha_found: first_alpha_found = True if idx == 0: self.first_word_title = token[0].isupper() word = Word(token) if word.is_number: self.number_count = self.number_count + 1 if idx == last_idx: self.last_word_number = True if word.is_date_entry: self.date_entry_count += 1 if idx == last_idx: self.last_word_date = True if word.is_dollar: self.dollar_count = self.dollar_count + 1 if idx == last_idx: self.last_word_number = True if word.is_percent: self.pct_count = self.pct_count + 1 if idx == last_idx: self.last_word_number = True self.eff_length += word.length if word.length == 1: single_letter_word_count += 1 if word.is_stop_word: if not self.hit_colon: self.stop_word_count = self.stop_word_count + 1 if idx == last_idx and len(token) != 1 and not token.isupper(): self.last_word_is_stop_word = True if word.is_noun or word.text == "&": noun = word.text_without_punct prev_word = self.words[-1] if len(self.words) > 0 else None if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf: noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway if noun.endswith("'s"): noun = noun[0:-2] noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] elif ( "".join([x.lower() for x in noun if x not in {".", ","}]) in self.noun_chunk_ending_tokens ): noun_chunk_buf.append(noun) self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] else: noun_chunk_buf.append(noun) elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]: noun_chunk_buf.append(word.text_without_punct) elif len(noun_chunk_buf): self.noun_chunks.append(" ".join(noun_chunk_buf)) noun_chunk_buf = [] self.words.append(word) if len(noun_chunk_buf) > 0: self.noun_chunks.append(" ".join(noun_chunk_buf)) self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks)))) self.first_word = tokens[0] self.last_word = tokens[-1] self.last_char = self.text[-1] self.ends_with_period = self.last_char == "." self.ends_with_comma = self.last_char == "," self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "." self.eff_word_count = self.alpha_count - self.stop_word_count self.check_ends_with_period() self.first_char = self.text[0] self.has_continuing_chars = not self.numbered_line and ( self.first_char.islower() or self.first_char in continuing_chars ) self.last_continuing_char = self.last_char in continuing_chars self.check_zipcode_or_pobox() self.check_list_item() self.check_header() self.check_table_row() self.separate_line = ( self.is_header or self.is_table_row or self.is_list_item or self.is_zipcode_or_po ) self.is_list_or_row = self.is_table_row or self.is_list_item self.is_header_or_row = ( self.is_header or self.is_table_row or self.is_zipcode_or_po ) self.ends_with_abbreviation = self.ends_with_period and ( (self.last_word.find(".") != len(self.last_word) - 1) or self.last_word.lower() in abbreviations or len(self.last_word) <= 3 ) self.incomplete_line = not self.is_header_or_row and ( not self.ends_with_period or self.ends_with_abbreviation or self.end_with_period_single_char ) self.continuing_line = self.has_continuing_chars and not self.separate_line self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8 self.set_line_type() if self.is_header or self.is_header_without_comma: if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2: self.is_reference_author_name = True self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list # print(self.separate_line) # self.continuing_line = not self.separate_line and def to_json(self): json_lp = dict(self.__dict__) del json_lp["visual_line"] words = [] for word in self.words: words.append(word.__dict__) json_lp["words"] = words return json_lp class VisualLine: def __init__(self, text_list=[], style_dict={}, page_stats={}): self.text_list = text_list self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.fs = None self.fw = None self.start_fs = None self.end_fs = None self.diff_prev_y = None self.diff_next_y = None self.is_comparably_sized = False self.is_comparably_bolded = False self.is_prev_space_smallest = False self.is_next_space_smallest = False self.wrapped_page = False self.text = " ".join(self.text_list) if style_dict: self.start_x = style_dict["start_x"][0] self.start_y = style_dict["start_y"][0] self.end_x = style_dict["end_x"][-1] self.end_y = style_dict["end_y"][-1] self.fs = style_dict["line_fs"][0] self.fw = style_dict["line_fw"][0] self.diff_prev_y = style_dict["diff_prev_y"][0] self.diff_next_y = style_dict["diff_next_y"][0] self.font_family = ( style_dict["font_family"][0] if len(style_dict["font_family"]) else None ) self.font_style = ( style_dict["font_style"][0] if len(style_dict["font_style"]) else None ) self.min_x = ( self.start_x ) # these variables are adjustable during line joins for line width self.max_x = self.end_x self.start_x_list = style_dict["start_x"] # joined ents self.end_x_list = style_dict["end_x"] # joined ents self.start_x_list_single_ent = style_dict["start_x_list"][0] self.end_x_list_single_ent = style_dict["end_x_list"][0] self.mode_fs = mode_of_list(style_dict["line_fs"]) self.tab_count = 0 # calculates tabs for when tika misses word split if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent): self.start_end_list = list( zip(self.start_x_list_single_ent, self.end_x_list_single_ent), ) for word_x, next_word_x in zip( self.start_end_list[:-1], self.start_end_list[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count += 1 else: self.start_end_list = [] self.tab_count_join = 0 # tab count after join in ptolines # calculates tabs for when tika misses word split if len(self.start_x_list) == len(self.end_x_list): self.start_end_list_join = list( zip(self.start_x_list, self.end_x_list), ) for word_x, next_word_x in zip( self.start_end_list_join[:-1], self.start_end_list_join[1:], ): word_start_x, word_end_x = word_x next_word_start_x, next_word_end_x = next_word_x word_distance = next_word_start_x - word_end_x if word_distance > 20: self.tab_count_join += 1 else: self.start_end_list_join = [] if len(self.text.split()) == 2 and self.tab_count == 1: self.text_list = self.text.split() # Count tabs in text list, Eventually make it a function of font size self.start_fs = round(style_dict["start_fs"][0], 1) self.end_fs = round(style_dict["end_fs"][-1], 1) self.compute_visual_features(page_stats) def compute_visual_features(self, page_stats): # compute font size relative to most common font font_sizes_mode = page_stats["mode_fs"] if self.fs > (4 / 3) * font_sizes_mode: self.is_comparably_sized = True else: self.is_comparably_sized = False # compute font weight relative to 600.0 which has generally # been observed to correspond to bolding of some sort font_weights_mode = page_stats["mode_fw"] if font_weights_mode >= 600.0: self.is_comparably_bolded = False elif self.fw > 600.0: self.is_comparably_bolded = True # compare line height for similar type (same font) lines if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2: for k, v in page_stats["fs_and_diff_prev_y"].items(): if k == self.fs and 0 <= v < self.diff_prev_y: break else: self.is_prev_space_smallest = True if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2: for k, v in page_stats["fs_and_diff_next_y"].items(): if k == self.fs and 0 <= v < self.diff_next_y: break else: self.is_next_space_smallest = True def should_join_table(self, next_line): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # check list of spaced words curr_line_ents = len(self.text_list) next_line_ents = len(next_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # compare alignment of elements in both lists if ent_match: return return False def should_join_para(self): return False def should_join_header(self): return False def __str__(self): output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest}," output_str += f"\nfont_style = {self.font_style}" return output_str <fim_middle>except Exception as e: logging.error(e) self.num_digits = 0
except Exception as e: logging.error(e) self.num_digits = 0
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ <fim_suffix> return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile')
if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile')
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: <fim_suffix> response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if stream: return await self._stream(*args, **kwargs)
if stream: return await self._stream(*args, **kwargs)
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path <fim_suffix> print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if path.exists(): args = f'@{await self._create_blob(path)}\n'
if path.exists(): args = f'@{await self._create_blob(path)}\n'
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') <fim_suffix> path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue
if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... <fim_suffix> raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8')
if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8')
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') <fim_suffix> path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue
if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ <fim_suffix> return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile')
if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile')
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) <fim_suffix> sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if not chunk: break
if not chunk: break
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) <fim_suffix> sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if not chunk: break
if not chunk: break
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: <fim_suffix> return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s)
if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s)
IF
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' <fim_suffix> return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>print(command, args, end='', file=out)
print(command, args, end='', file=out)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() <fim_suffix> if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>path = path if path.is_absolute() else base / path
path = path if path.is_absolute() else base / path
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): <fim_suffix> raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>return b64encode(b.read()).decode('utf-8')
return b64encode(b.read()).decode('utf-8')
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: <fim_suffix> return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>b64decode(image, validate=True)
b64decode(image, validate=True)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): <fim_suffix> if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>command, _, args = line.partition(' ')
command, _, args = line.partition(' ')
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: <fim_suffix> def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: <fim_suffix> else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>modelfile = self._parse_modelfile(modelfile)
modelfile = self._parse_modelfile(modelfile)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_types.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) # ollama-python/ollama/_client.py async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial """ import json from typing import Any, TypedDict, Sequence, Literal import sys if sys.version_info < (3, 11): from typing_extensions import NotRequired else: from typing import NotRequired class BaseGenerateResponse(TypedDict): model: str 'Model used to generate response.' created_at: str 'Time when the request was created.' done: bool 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' total_duration: int 'Total duration in nanoseconds.' load_duration: int 'Load duration in nanoseconds.' prompt_eval_count: int 'Number of tokens evaluated in the prompt.' prompt_eval_duration: int 'Duration of evaluating the prompt in nanoseconds.' eval_count: int 'Number of tokens evaluated in inference.' eval_duration: int 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Sequence[int] 'Tokenized history up to the point of the response.' class Message(TypedDict): """ Chat message. """ role: Literal['user', 'assistant', 'system'] "Assumed role of the message. Response messages always has role 'assistant'." content: str 'Content of the message. Response messages contains message fragments when streaming.' images: NotRequired[Sequence[Any]] """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class ProgressResponse(TypedDict): status: str completed: int total: int digest: str class Options(TypedDict, total=False): # load time options numa: bool num_ctx: int num_batch: int num_gqa: int num_gpu: int main_gpu: int low_vram: bool f16_kv: bool logits_all: bool vocab_only: bool use_mmap: bool use_mlock: bool embedding_only: bool rope_frequency_base: float rope_frequency_scale: float num_thread: int # runtime options num_keep: int seed: int num_predict: int top_k: int top_p: float tfs_z: float typical_p: float repeat_last_n: int temperature: float repeat_penalty: float presence_penalty: float frequency_penalty: float mirostat: int mirostat_tau: float mirostat_eta: float penalize_newline: bool stop: Sequence[str] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails <fim_suffix> except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' <fim_middle>error = json.loads(error).get('error', error)
error = json.loads(error).get('error', error)
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 <fim_suffix> host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) <fim_suffix> return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>yield partial
yield partial
STATEMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: <fim_suffix> host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """
""" >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: <fim_suffix> if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: <fim_suffix> if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """
""" Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: <fim_suffix> if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: <fim_suffix> if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """
""" Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: <fim_suffix> return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: <fim_suffix> return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: <fim_suffix> headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """
""" Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: <fim_suffix> if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """
""" >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: <fim_suffix> return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
""" Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: <fim_suffix> except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: r.raise_for_status()
try: r.raise_for_status()
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' <fim_suffix> except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: await self._request('HEAD', f'/api/blobs/{digest}')
try: await self._request('HEAD', f'/api/blobs/{digest}')
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' <fim_suffix> except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: self._request('HEAD', f'/api/blobs/{digest}')
try: self._request('HEAD', f'/api/blobs/{digest}')
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: <fim_suffix> except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: r.raise_for_status()
try: r.raise_for_status()
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: <fim_suffix> except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: r.raise_for_status()
try: r.raise_for_status()
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_types.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) # ollama-python/ollama/_client.py async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial """ import json from typing import Any, TypedDict, Sequence, Literal import sys if sys.version_info < (3, 11): from typing_extensions import NotRequired else: from typing import NotRequired class BaseGenerateResponse(TypedDict): model: str 'Model used to generate response.' created_at: str 'Time when the request was created.' done: bool 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' total_duration: int 'Total duration in nanoseconds.' load_duration: int 'Load duration in nanoseconds.' prompt_eval_count: int 'Number of tokens evaluated in the prompt.' prompt_eval_duration: int 'Duration of evaluating the prompt in nanoseconds.' eval_count: int 'Number of tokens evaluated in inference.' eval_duration: int 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Sequence[int] 'Tokenized history up to the point of the response.' class Message(TypedDict): """ Chat message. """ role: Literal['user', 'assistant', 'system'] "Assumed role of the message. Response messages always has role 'assistant'." content: str 'Content of the message. Response messages contains message fragments when streaming.' images: NotRequired[Sequence[Any]] """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class ProgressResponse(TypedDict): status: str completed: int total: int digest: str class Options(TypedDict, total=False): # load time options numa: bool num_ctx: int num_batch: int num_gqa: int num_gpu: int main_gpu: int low_vram: bool f16_kv: bool logits_all: bool vocab_only: bool use_mmap: bool use_mlock: bool embedding_only: bool rope_frequency_base: float rope_frequency_scale: float num_thread: int # runtime options num_keep: int seed: int num_predict: int top_k: int top_p: float tfs_z: float typical_p: float repeat_last_n: int temperature: float repeat_penalty: float presence_penalty: float frequency_penalty: float mirostat: int mirostat_tau: float mirostat_eta: float penalize_newline: bool stop: Sequence[str] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): <fim_suffix> except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' <fim_middle>try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error)
try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error)
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') <fim_suffix> except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8')
try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8')
TRY
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_types.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) # ollama-python/ollama/_client.py async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial """ import json from typing import Any, TypedDict, Sequence, Literal import sys if sys.version_info < (3, 11): from typing_extensions import NotRequired else: from typing import NotRequired class BaseGenerateResponse(TypedDict): model: str 'Model used to generate response.' created_at: str 'Time when the request was created.' done: bool 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' total_duration: int 'Total duration in nanoseconds.' load_duration: int 'Load duration in nanoseconds.' prompt_eval_count: int 'Number of tokens evaluated in the prompt.' prompt_eval_duration: int 'Duration of evaluating the prompt in nanoseconds.' eval_count: int 'Number of tokens evaluated in inference.' eval_duration: int 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Sequence[int] 'Tokenized history up to the point of the response.' class Message(TypedDict): """ Chat message. """ role: Literal['user', 'assistant', 'system'] "Assumed role of the message. Response messages always has role 'assistant'." content: str 'Content of the message. Response messages contains message fragments when streaming.' images: NotRequired[Sequence[Any]] """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class ProgressResponse(TypedDict): status: str completed: int total: int digest: str class Options(TypedDict, total=False): # load time options numa: bool num_ctx: int num_batch: int num_gqa: int num_gpu: int main_gpu: int low_vram: bool f16_kv: bool logits_all: bool vocab_only: bool use_mmap: bool use_mlock: bool embedding_only: bool rope_frequency_base: float rope_frequency_scale: float num_thread: int # runtime options num_keep: int seed: int num_predict: int top_k: int top_p: float tfs_z: float typical_p: float repeat_last_n: int temperature: float repeat_penalty: float presence_penalty: float frequency_penalty: float mirostat: int mirostat_tau: float mirostat_eta: float penalize_newline: bool stop: Sequence[str] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): try: <fim_suffix> # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' <fim_middle># try to parse content as JSON and extract 'error'
# try to parse content as JSON and extract 'error'
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_types.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) # ollama-python/ollama/_client.py async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial """ import json from typing import Any, TypedDict, Sequence, Literal import sys if sys.version_info < (3, 11): from typing_extensions import NotRequired else: from typing import NotRequired class BaseGenerateResponse(TypedDict): model: str 'Model used to generate response.' created_at: str 'Time when the request was created.' done: bool 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' total_duration: int 'Total duration in nanoseconds.' load_duration: int 'Load duration in nanoseconds.' prompt_eval_count: int 'Number of tokens evaluated in the prompt.' prompt_eval_duration: int 'Duration of evaluating the prompt in nanoseconds.' eval_count: int 'Number of tokens evaluated in inference.' eval_duration: int 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Sequence[int] 'Tokenized history up to the point of the response.' class Message(TypedDict): """ Chat message. """ role: Literal['user', 'assistant', 'system'] "Assumed role of the message. Response messages always has role 'assistant'." content: str 'Content of the message. Response messages contains message fragments when streaming.' images: NotRequired[Sequence[Any]] """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class ProgressResponse(TypedDict): status: str completed: int total: int digest: str class Options(TypedDict, total=False): # load time options numa: bool num_ctx: int num_batch: int num_gqa: int num_gpu: int main_gpu: int low_vram: bool f16_kv: bool logits_all: bool vocab_only: bool use_mmap: bool use_mlock: bool embedding_only: bool rope_frequency_base: float rope_frequency_scale: float num_thread: int # runtime options num_keep: int seed: int num_predict: int top_k: int top_p: float tfs_z: float typical_p: float repeat_last_n: int temperature: float repeat_penalty: float presence_penalty: float frequency_penalty: float mirostat: int mirostat_tau: float mirostat_eta: float penalize_newline: bool stop: Sequence[str] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' <fim_suffix> error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' <fim_middle># fallback to raw content if JSON parsing fails
# fallback to raw content if JSON parsing fails
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') <fim_suffix> if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except (binascii.Error, TypeError): ...
except (binascii.Error, TypeError): ...
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') <fim_suffix> return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r)
except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r)
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() <fim_suffix> for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() <fim_suffix> async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') <fim_suffix> return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_types.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) # ollama-python/ollama/_client.py def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) # ollama-python/ollama/_client.py async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial """ import json from typing import Any, TypedDict, Sequence, Literal import sys if sys.version_info < (3, 11): from typing_extensions import NotRequired else: from typing import NotRequired class BaseGenerateResponse(TypedDict): model: str 'Model used to generate response.' created_at: str 'Time when the request was created.' done: bool 'True if response is complete, otherwise False. Useful for streaming to detect the final response.' total_duration: int 'Total duration in nanoseconds.' load_duration: int 'Load duration in nanoseconds.' prompt_eval_count: int 'Number of tokens evaluated in the prompt.' prompt_eval_duration: int 'Duration of evaluating the prompt in nanoseconds.' eval_count: int 'Number of tokens evaluated in inference.' eval_duration: int 'Duration of evaluating inference in nanoseconds.' class GenerateResponse(BaseGenerateResponse): """ Response returned by generate requests. """ response: str 'Response content. When streaming, this contains a fragment of the response.' context: Sequence[int] 'Tokenized history up to the point of the response.' class Message(TypedDict): """ Chat message. """ role: Literal['user', 'assistant', 'system'] "Assumed role of the message. Response messages always has role 'assistant'." content: str 'Content of the message. Response messages contains message fragments when streaming.' images: NotRequired[Sequence[Any]] """ Optional list of image data for multimodal models. Valid input types are: - `str` or path-like object: path to image file - `bytes` or bytes-like object: raw image data Valid image formats depend on the model. See the model card for more information. """ class ChatResponse(BaseGenerateResponse): """ Response returned by chat requests. """ message: Message 'Response message.' class ProgressResponse(TypedDict): status: str completed: int total: int digest: str class Options(TypedDict, total=False): # load time options numa: bool num_ctx: int num_batch: int num_gqa: int num_gpu: int main_gpu: int low_vram: bool f16_kv: bool logits_all: bool vocab_only: bool use_mmap: bool use_mlock: bool embedding_only: bool rope_frequency_base: float rope_frequency_scale: float num_thread: int # runtime options num_keep: int seed: int num_predict: int top_k: int top_p: float tfs_z: float typical_p: float repeat_last_n: int temperature: float repeat_penalty: float presence_penalty: float frequency_penalty: float mirostat: int mirostat_tau: float mirostat_eta: float penalize_newline: bool stop: Sequence[str] class RequestError(Exception): """ Common class for request errors. """ def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' class ResponseError(Exception): """ Common class for response errors. """ def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) <fim_suffix> super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' <fim_middle>except json.JSONDecodeError: ...
except json.JSONDecodeError: ...
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() <fim_suffix> async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None
CATCH
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() <fim_suffix> return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out)
for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out)
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def speak(speaker, content): if speaker: p = await asyncio.create_subprocess_exec(speaker, content) await p.communicate() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') <fim_suffix> return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images]
for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images]
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None <fim_suffix> def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial
for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you: # ollama-python/examples/async-chat-stream/main.py async def main(): parser = argparse.ArgumentParser() parser.add_argument('--speak', default=False, action='store_true') args = parser.parse_args() speaker = None if not args.speak: ... elif say := shutil.which('say'): speaker = say elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')): speaker = espeak client = ollama.AsyncClient() messages = [] while True: if content_in := input('>>> '): messages.append({'role': 'user', 'content': content_in}) content_out = '' message = {'role': 'assistant', 'content': ''} async for response in await client.chat(model='mistral', messages=messages, stream=True): if response['done']: messages.append(message) content = response['message']['content'] print(content, end='', flush=True) content_out += content if content in ['.', '!', '?', '\n']: await speak(speaker, content_out) content_out = '' message['content'] += content if content_out: await speak(speaker, content_out) print() # ollama-python/ollama/_types.py def __init__(self, error: str): super().__init__(error) self.error = error 'Reason for the error.' # ollama-python/ollama/_types.py def __init__(self, error: str, status_code: int = -1): try: # try to parse content as JSON and extract 'error' # fallback to raw content if JSON parsing fails error = json.loads(error).get('error', error) except json.JSONDecodeError: ... super().__init__(error) self.error = error 'Reason for the error.' self.status_code = status_code 'HTTP status code of the response.' """ import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() <fim_suffix> return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}' <fim_middle>for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out)
for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out)
FOR
prefix_suffix_full_complete_current_block_with_repo_rag_oracle